merge with main

This commit is contained in:
Lincoln Stein 2024-02-02 12:35:24 -05:00
commit 4c5aedbcba
717 changed files with 12625 additions and 13666 deletions

View File

@ -6,10 +6,6 @@ title: '[bug]: '
labels: ['bug']
# assignees:
# - moderator_bot
# - lstein
body:
- type: markdown
attributes:
@ -18,10 +14,9 @@ body:
- type: checkboxes
attributes:
label: Is there an existing issue for this?
label: Is there an existing issue for this problem?
description: |
Please use the [search function](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen+label%3Abug)
irst to see if an issue already exists for the bug you encountered.
Please [search](https://github.com/invoke-ai/InvokeAI/issues) first to see if an issue already exists for the problem.
options:
- label: I have searched the existing issues
required: true
@ -33,80 +28,119 @@ body:
- type: dropdown
id: os_dropdown
attributes:
label: OS
description: Which operating System did you use when the bug occured
label: Operating system
description: Your computer's operating system.
multiple: false
options:
- 'Linux'
- 'Windows'
- 'macOS'
- 'other'
validations:
required: true
- type: dropdown
id: gpu_dropdown
attributes:
label: GPU
description: Which kind of Graphic-Adapter is your System using
label: GPU vendor
description: Your GPU's vendor.
multiple: false
options:
- 'cuda'
- 'amd'
- 'mps'
- 'cpu'
- 'Nvidia (CUDA)'
- 'AMD (ROCm)'
- 'Apple Silicon (MPS)'
- 'None (CPU)'
validations:
required: true
- type: input
id: gpu_model
attributes:
label: GPU model
description: Your GPU's model. If on Apple Silicon, this is your Mac's chip. Leave blank if on CPU.
placeholder: ex. RTX 2080 Ti, Mac M1 Pro
validations:
required: false
- type: input
id: vram
attributes:
label: VRAM
description: Size of the VRAM if known
label: GPU VRAM
description: Your GPU's VRAM. If on Apple Silicon, this is your Mac's unified memory. Leave blank if on CPU.
placeholder: 8GB
validations:
required: false
- type: input
id: version-number
attributes:
label: What version did you experience this issue on?
label: Version number
description: |
Please share the version of Invoke AI that you experienced the issue on. If this is not the latest version, please update first to confirm the issue still exists. If you are testing main, please include the commit hash instead.
placeholder: X.X.X
The version of Invoke you have installed. If it is not the latest version, please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
placeholder: ex. 3.6.1
validations:
required: true
- type: input
id: browser-version
attributes:
label: Browser
description: Your web browser and version.
placeholder: ex. Firefox 123.0b3
validations:
required: true
- type: textarea
id: python-deps
attributes:
label: Python dependencies
description: |
If the problem occurred during image generation, click the gear icon at the bottom left corner, click "About", click the copy button and then paste here.
validations:
required: false
- type: textarea
id: what-happened
attributes:
label: What happened?
label: What happened
description: |
Briefly describe what happened, what you expected to happen and how to reproduce this bug.
placeholder: When using the webinterface and right-clicking on button X instead of the popup-menu there error Y appears
Describe what happened. Include any relevant error messages, stack traces and screenshots here.
placeholder: I clicked button X and then Y happened.
validations:
required: true
- type: textarea
id: what-you-expected
attributes:
label: Screenshots
description: If applicable, add screenshots to help explain your problem
placeholder: this is what the result looked like <screenshot>
label: What you expected to happen
description: Describe what you expected to happen.
placeholder: I expected Z to happen.
validations:
required: true
- type: textarea
id: how-to-repro
attributes:
label: How to reproduce the problem
description: List steps to reproduce the problem.
placeholder: Start the app, generate an image with these settings, then click button X.
validations:
required: false
- type: textarea
id: additional-context
attributes:
label: Additional context
description: Add any other context about the problem here
description: Any other context that might help us to understand the problem.
placeholder: Only happens when there is full moon and Friday the 13th on Christmas Eve 🎅🏻
validations:
required: false
- type: input
id: contact
id: discord-username
attributes:
label: Contact Details
description: __OPTIONAL__ How can we get in touch with you if we need more info (besides this issue)?
placeholder: ex. email@example.com, discordname, twitter, ...
label: Discord username
description: If you are on the Invoke discord and would prefer to be contacted there, please provide your username.
placeholder: supercoolusername123
validations:
required: false

59
.github/pr_labels.yml vendored Normal file
View File

@ -0,0 +1,59 @@
Root:
- changed-files:
- any-glob-to-any-file: '*'
PythonDeps:
- changed-files:
- any-glob-to-any-file: 'pyproject.toml'
Python:
- changed-files:
- all-globs-to-any-file:
- 'invokeai/**'
- '!invokeai/frontend/web/**'
PythonTests:
- changed-files:
- any-glob-to-any-file: 'tests/**'
CICD:
- changed-files:
- any-glob-to-any-file: .github/**
Docker:
- changed-files:
- any-glob-to-any-file: docker/**
Installer:
- changed-files:
- any-glob-to-any-file: installer/**
Documentation:
- changed-files:
- any-glob-to-any-file: docs/**
Invocations:
- changed-files:
- any-glob-to-any-file: 'invokeai/app/invocations/**'
Backend:
- changed-files:
- any-glob-to-any-file: 'invokeai/backend/**'
Api:
- changed-files:
- any-glob-to-any-file: 'invokeai/app/api/**'
Services:
- changed-files:
- any-glob-to-any-file: 'invokeai/app/services/**'
FrontendDeps:
- changed-files:
- any-glob-to-any-file:
- '**/*/package.json'
- '**/*/pnpm-lock.yaml'
Frontend:
- changed-files:
- any-glob-to-any-file: 'invokeai/frontend/web/**'

16
.github/workflows/label-pr.yml vendored Normal file
View File

@ -0,0 +1,16 @@
name: "Pull Request Labeler"
on:
- pull_request_target
jobs:
labeler:
permissions:
contents: read
pull-requests: write
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/labeler@v5
with:
configuration-path: .github/pr_labels.yml

View File

@ -169,7 +169,7 @@ the command `npm install -g pnpm` if needed)
_For Linux with an AMD GPU:_
```sh
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6
```
_For non-GPU systems:_

View File

@ -1,76 +0,0 @@
# Contributing to the Frontend
# InvokeAI Web UI
- [InvokeAI Web UI](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#invokeai-web-ui)
- [Stack](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#stack)
- [Contributing](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#contributing)
- [Dev Environment](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#dev-environment)
- [Production builds](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#production-builds)
The UI is a fairly straightforward Typescript React app, with the Unified Canvas being more complex.
Code is located in `invokeai/frontend/web/` for review.
## Stack
State management is Redux via [Redux Toolkit](https://github.com/reduxjs/redux-toolkit). We lean heavily on RTK:
- `createAsyncThunk` for HTTP requests
- `createEntityAdapter` for fetching images and models
- `createListenerMiddleware` for workflows
The API client and associated types are generated from the OpenAPI schema. See API_CLIENT.md.
Communication with server is a mix of HTTP and [socket.io](https://github.com/socketio/socket.io-client) (with a simple socket.io redux middleware to help).
[Chakra-UI](https://github.com/chakra-ui/chakra-ui) & [Mantine](https://github.com/mantinedev/mantine) for components and styling.
[Konva](https://github.com/konvajs/react-konva) for the canvas, but we are pushing the limits of what is feasible with it (and HTML canvas in general). We plan to rebuild it with [PixiJS](https://github.com/pixijs/pixijs) to take advantage of WebGL's improved raster handling.
[Vite](https://vitejs.dev/) for bundling.
Localisation is via [i18next](https://github.com/i18next/react-i18next), but translation happens on our [Weblate](https://hosted.weblate.org/engage/invokeai/) project. Only the English source strings should be changed on this repo.
## Contributing
Thanks for your interest in contributing to the InvokeAI Web UI!
We encourage you to ping @psychedelicious and @blessedcoolant on [Discord](https://discord.gg/ZmtBAhwWhy) if you want to contribute, just to touch base and ensure your work doesn't conflict with anything else going on. The project is very active.
### Dev Environment
**Setup**
1. Install [node](https://nodejs.org/en/download/). You can confirm node is installed with:
```bash
node --version
```
2. Install [pnpm](https://pnpm.io/) and confirm it is installed by running this:
```bash
npm install --global pnpm
pnpm --version
```
From `invokeai/frontend/web/` run `pnpm install` to get everything set up.
Start everything in dev mode:
1. Ensure your virtual environment is running
2. Start the dev server: `pnpm dev`
3. Start the InvokeAI Nodes backend: `python scripts/invokeai-web.py # run from the repo root`
4. Point your browser to the dev server address e.g. [http://localhost:5173/](http://localhost:5173/)
### VSCode Remote Dev
We've noticed an intermittent issue with the VSCode Remote Dev port forwarding. If you use this feature of VSCode, you may intermittently click the Invoke button and then get nothing until the request times out. Suggest disabling the IDE's port forwarding feature and doing it manually via SSH:
`ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@host`
### Production builds
For a number of technical and logistical reasons, we need to commit UI build artefacts to the repo.
If you submit a PR, there is a good chance we will ask you to include a separate commit with a build of the app.
To build for production, run `pnpm build`.

View File

@ -12,7 +12,7 @@ To get started, take a look at our [new contributors checklist](newContributorCh
Once you're setup, for more information, you can review the documentation specific to your area of interest:
* #### [InvokeAI Architecure](../ARCHITECTURE.md)
* #### [Frontend Documentation](./contributingToFrontend.md)
* #### [Frontend Documentation](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web)
* #### [Node Documentation](../INVOCATIONS.md)
* #### [Local Development](../LOCAL_DEVELOPMENT.md)

BIN
docs/img/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.2 KiB

View File

@ -117,6 +117,11 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB of RAM.
## :octicons-gift-24: InvokeAI Features
### Installation
- [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
- [Manual Installation](installation/020_INSTALL_MANUAL.md)
- [Docker Installation](installation/040_INSTALL_DOCKER.md)
### The InvokeAI Web Interface
- [WebUI overview](features/WEB.md)
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)

View File

@ -477,7 +477,7 @@ Then type the following commands:
=== "AMD System"
```bash
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.6
```
### Corrupted configuration file

View File

@ -154,7 +154,7 @@ manager, please follow these steps:
=== "ROCm (AMD)"
```bash
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6
```
=== "CPU (Intel Macs & non-GPU systems)"
@ -313,7 +313,7 @@ code for InvokeAI. For this to work, you will need to install the
on your system, please see the [Git Installation
Guide](https://github.com/git-guides/install-git)
You will also need to install the [frontend development toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/docs/contributing/contribution_guides/contributingToFrontend.md).
You will also need to install the [frontend development toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/README.md).
If you have a "normal" installation, you should create a totally separate virtual environment for the git-based installation, else the two may interfere.
@ -345,7 +345,7 @@ installation protocol (important!)
=== "ROCm (AMD)"
```bash
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6
```
=== "CPU (Intel Macs & non-GPU systems)"
@ -361,7 +361,7 @@ installation protocol (important!)
Be sure to pass `-e` (for an editable install) and don't forget the
dot ("."). It is part of the command.
5. Install the [frontend toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/docs/contributing/contribution_guides/contributingToFrontend.md) and do a production build of the UI as described.
5. Install the [frontend toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/README.md) and do a production build of the UI as described.
6. You can now run `invokeai` and its related commands. The code will be
read from the repository, so that you can edit the .py source files

View File

@ -134,7 +134,7 @@ recipes are available
When installing torch and torchvision manually with `pip`, remember to provide
the argument `--extra-index-url
https://download.pytorch.org/whl/rocm5.4.2` as described in the [Manual
https://download.pytorch.org/whl/rocm5.6` as described in the [Manual
Installation Guide](020_INSTALL_MANUAL.md).
This will be done automatically for you if you use the installer

View File

@ -18,13 +18,18 @@ either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
driver).
## **[Automated Installer](010_INSTALL_AUTOMATED.md)**
✅ This is the recommended installation method for first-time users.
## **[Automated Installer (Recommended)](010_INSTALL_AUTOMATED.md)**
✅ This is the recommended installation method for first-time users.
This is a script that will install all of InvokeAI's essential
third party libraries and InvokeAI itself. It includes access to a
"developer console" which will help us debug problems with you and
give you to access experimental features.
third party libraries and InvokeAI itself.
🖥️ **Download the latest installer .zip file here** : https://github.com/invoke-ai/InvokeAI/releases/latest
- *Look for the file labelled "InvokeAI-installer-v3.X.X.zip" at the bottom of the page*
- If you experience issues, read through the full [installation instructions](010_INSTALL_AUTOMATED.md) to make sure you have met all of the installation requirements. If you need more help, join the [Discord](discord.gg/invoke-ai) or create an issue on [Github](https://github.com/invoke-ai/InvokeAI).
## **[Manual Installation](020_INSTALL_MANUAL.md)**
This method is recommended for experienced users and developers.

View File

@ -25,6 +25,7 @@ To use a community workflow, download the the `.json` node graph file and load i
+ [GPT2RandomPromptMaker](#gpt2randompromptmaker)
+ [Grid to Gif](#grid-to-gif)
+ [Halftone](#halftone)
+ [Hand Refiner with MeshGraphormer](#hand-refiner-with-meshgraphormer)
+ [Image and Mask Composition Pack](#image-and-mask-composition-pack)
+ [Image Dominant Color](#image-dominant-color)
+ [Image to Character Art Image Nodes](#image-to-character-art-image-nodes)
@ -196,6 +197,18 @@ CMYK Halftone Output:
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/c59c578f-db8e-4d66-8c66-2851752d75ea" width="300" />
--------------------------------
### Hand Refiner with MeshGraphormer
**Description**: Hand Refiner takes in your image and automatically generates a fixed depth map for the hands along with a mask of the hands region that will conveniently allow you to use them along with ControlNet to fix the wonky hands generated by Stable Diffusion
**Node Link:** https://github.com/blessedcoolant/invoke_meshgraphormer
**View**
<img src="https://raw.githubusercontent.com/blessedcoolant/invoke_meshgraphormer/main/assets/preview.jpg" />
--------------------------------
### Image and Mask Composition Pack
**Description:** This is a pack of nodes for composing masks and images, including a simple text mask creator and both image and latent offset nodes. The offsets wrap around, so these can be used in conjunction with the Seamless node to progressively generate centered on different parts of the seamless tiling.

View File

@ -13,46 +13,69 @@ We thank them for all of their time and hard work.
- [Lincoln D. Stein](mailto:lincoln.stein@gmail.com)
## **Current core team**
## **Current Core Team**
* @lstein (Lincoln Stein) - Co-maintainer
* @blessedcoolant - Co-maintainer
* @hipsterusername (Kent Keirsey) - Co-maintainer, CEO, Positive Vibes
* @psychedelicious (Spencer Mabrito) - Web Team Leader
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
* @damian0815 - Attention Systems and Compel Maintainer
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
* @genomancer (Gregg Helt) - Controlnet support
* @StAlKeR7779 (Sergey Borisov) - Torch stack, ONNX, model management, optimization
* @chainchompa (Jennifer Player) - Web Development & Chain-Chomping
* @josh is toast (Josh Corbett) - Web Development
* @cheerio (Mary Rogers) - Lead Engineer & Web App Development
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
* @sunija - Standalone version
* @genomancer (Gregg Helt) - Controlnet support
* @brandon (Brandon Rising) - Platform, Infrastructure, Backend Systems
* @ryanjdick (Ryan Dick) - Machine Learning & Training
* @millu (Millun Atluri) - Community Manager, Documentation, Node-wrangler
* @chainchompa (Jennifer Player) - Web Development & Chain-Chomping
* @JPPhoto - Core image generation nodes
* @dunkeroni - Image generation backend
* @SkunkWorxDark - Image generation backend
* @keturn (Kevin Turner) - Diffusers
* @millu (Millun Atluri) - Community Wizard, Documentation, Node-wrangler,
* @glimmerleaf (Devon Hopkins) - Community Wizard
* @gogurt enjoyer - Discord moderator and end user support
* @whosawhatsis - Discord moderator and end user support
* @dwinrger - Discord moderator and end user support
* @526christian - Discord moderator and end user support
* @harvester62 - Discord moderator and end user support
## **Honored Team Alumni**
* @StAlKeR7779 (Sergey Borisov) - Torch stack, ONNX, model management, optimization
* @damian0815 - Attention Systems and Compel Maintainer
* @netsvetaev (Artur) - Localization support
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
* @tildebyte - Installation and configuration
* @mauwii (Matthias Wilde) - Installation, release, continuous integration
## **Full List of Contributors by Commit Name**
- 이승석
- AbdBarho
- ablattmann
- AdamOStark
- Adam Rice
- Airton Silva
- Aldo Hoeben
- Alexander Eichhorn
- Alexandre D. Roberge
- Alexandre Macabies
- Alfie John
- Andreas Rozek
- Andre LaBranche
- Andy Bearman
- Andy Luhrs
- Andy Pilate
- Anonymous
- Anthony Monthe
- Any-Winter-4079
- apolinario
- Ar7ific1al
- ArDiouscuros
- Armando C. Santisbon
- Arnold Cordewiner
- Arthur Holstvoogd
- artmen1516
- Artur
@ -64,13 +87,16 @@ We thank them for all of their time and hard work.
- blhook
- BlueAmulet
- Bouncyknighter
- Brandon
- Brandon Rising
- Brent Ozar
- Brian Racer
- bsilvereagle
- c67e708d
- camenduru
- CapableWeb
- Carson Katri
- chainchompa
- Chloe
- Chris Dawson
- Chris Hayes
@ -86,30 +112,45 @@ We thank them for all of their time and hard work.
- cpacker
- Cragin Godley
- creachec
- CrypticWit
- d8ahazard
- damian
- damian0815
- Damian at mba
- Damian Stewart
- Daniel Manzke
- Danny Beer
- Dan Sully
- Darren Ringer
- David Burnett
- David Ford
- David Regla
- David Sisco
- David Wager
- Daya Adianto
- db3000
- DekitaRPG
- Denis Olshin
- Dennis
- dependabot[bot]
- Dmitry Parnas
- Dobrynia100
- Dominic Letz
- DrGunnarMallon
- Drun555
- dunkeroni
- Edward Johan
- elliotsayes
- Elrik
- ElrikUnderlake
- Eric Khun
- Eric Wolf
- Eugene
- Eugene Brodsky
- ExperimentalCyborg
- Fabian Bahl
- Fabio 'MrWHO' Torchetti
- Fattire
- fattire
- Felipe Nogueira
- Félix Sanz
@ -118,8 +159,12 @@ We thank them for all of their time and hard work.
- gabrielrotbart
- gallegonovato
- Gérald LONLAS
- Gille
- GitHub Actions Bot
- glibesyck
- gogurtenjoyer
- Gohsuke Shimada
- greatwolf
- greentext2
- Gregg Helt
- H4rk
@ -131,6 +176,7 @@ We thank them for all of their time and hard work.
- Hosted Weblate
- Iman Karim
- ismail ihsan bülbül
- ItzAttila
- Ivan Efimov
- jakehl
- Jakub Kolčář
@ -141,6 +187,7 @@ We thank them for all of their time and hard work.
- Jason Toffaletti
- Jaulustus
- Jeff Mahoney
- Jennifer Player
- jeremy
- Jeremy Clark
- JigenD
@ -148,19 +195,26 @@ We thank them for all of their time and hard work.
- Johan Roxendal
- Johnathon Selstad
- Jonathan
- Jordan Hewitt
- Joseph Dries III
- Josh Corbett
- JPPhoto
- jspraul
- junzi
- Justin Wong
- Juuso V
- Kaspar Emanuel
- Katsuyuki-Karasawa
- Keerigan45
- Kent Keirsey
- Kevin Brack
- Kevin Coakley
- Kevin Gibbons
- Kevin Schaul
- Kevin Turner
- Kieran Klaassen
- krummrey
- Kyle
- Kyle Lacy
- Kyle Schouviller
- Lawrence Norton
@ -171,10 +225,15 @@ We thank them for all of their time and hard work.
- Lynne Whitehorn
- majick
- Marco Labarile
- Marta Nahorniuk
- Martin Kristiansen
- Mary Hipp
- maryhipp
- Mary Hipp Rogers
- mastercaster
- mastercaster9000
- Matthias Wild
- mauwii
- michaelk71
- mickr777
- Mihai
@ -182,11 +241,15 @@ We thank them for all of their time and hard work.
- Mikhail Tishin
- Millun Atluri
- Minjune Song
- Mitchell Allain
- mitien
- mofuzz
- Muhammad Usama
- Name
- _nderscore
- Neil Wang
- nekowaiz
- nemuruibai
- Netzer R
- Nicholas Koh
- Nicholas Körfer
@ -197,9 +260,11 @@ We thank them for all of their time and hard work.
- ofirkris
- Olivier Louvignes
- owenvincent
- pand4z31
- Patrick Esser
- Patrick Tien
- Patrick von Platen
- Paul Curry
- Paul Sajna
- pejotr
- Peter Baylies
@ -207,6 +272,7 @@ We thank them for all of their time and hard work.
- plucked
- prixt
- psychedelicious
- psychedelicious@windows
- Rainer Bernhardt
- Riccardo Giovanetti
- Rich Jones
@ -215,17 +281,22 @@ We thank them for all of their time and hard work.
- Robert Bolender
- Robin Rombach
- Rohan Barar
- rohinish404
- Rohinish
- rpagliuca
- rromb
- Rupesh Sreeraman
- Ryan
- Ryan Cao
- Ryan Dick
- Saifeddine
- Saifeddine ALOUI
- Sam
- SammCheese
- Sam McLeod
- Sammy
- sammyf
- Samuel Husso
- Saurav Maheshkar
- Scott Lahteine
- Sean McLellan
- Sebastian Aigner
@ -233,16 +304,21 @@ We thank them for all of their time and hard work.
- Sergey Krashevich
- Shapor Naghibzadeh
- Shawn Zhong
- Simona Liliac
- Simon Vans-Colina
- skunkworxdark
- slashtechno
- SoheilRezaei
- Song, Pengcheng
- spezialspezial
- ssantos
- StAlKeR7779
- Stefan Tobler
- Stephan Koglin-Fischer
- SteveCaruso
- Steve Martinelli
- Steven Frank
- Surisen
- System X - Files
- Taylor Kems
- techicode
@ -261,26 +337,34 @@ We thank them for all of their time and hard work.
- tyler
- unknown
- user1
- vedant-3010
- Vedant Madane
- veprogames
- wa.code
- wfng92
- whjms
- whosawhatsis
- Will
- William Becher
- William Chong
- Wilson E. Alvarez
- woweenie
- Wubbbi
- xra
- Yeung Yiu Hung
- ymgenesis
- Yorzaren
- Yosuke Shinya
- yun saki
- ZachNagengast
- Zadagu
- zeptofine
- Zerdoumi
- Васянатор
- 冯不游
- 唐澤 克幸
## **Original CompVis Authors**
## **Original CompVis (Stable Diffusion) Authors**
- [Robin Rombach](https://github.com/rromb)
- [Patrick von Platen](https://github.com/patrickvonplaten)

File diff suppressed because it is too large Load Diff

View File

@ -455,7 +455,7 @@ def get_torch_source() -> (Union[str, None], str):
optional_modules = "[onnx]"
if OS == "Linux":
if device == "rocm":
url = "https://download.pytorch.org/whl/rocm5.4.2"
url = "https://download.pytorch.org/whl/rocm5.6"
elif device == "cpu":
url = "https://download.pytorch.org/whl/cpu"

View File

@ -2,6 +2,7 @@
from logging import Logger
from invokeai.app.services.item_storage.item_storage_memory import ItemStorageMemory
from invokeai.app.services.shared.sqlite.sqlite_util import init_db
from invokeai.backend.model_manager.metadata import ModelMetadataStore
from invokeai.backend.util.logging import InvokeAILogger
@ -22,7 +23,6 @@ from ..services.invocation_queue.invocation_queue_memory import MemoryInvocation
from ..services.invocation_services import InvocationServices
from ..services.invocation_stats.invocation_stats_default import InvocationStatsService
from ..services.invoker import Invoker
from ..services.item_storage.item_storage_sqlite import SqliteItemStorage
from ..services.latents_storage.latents_storage_disk import DiskLatentsStorage
from ..services.latents_storage.latents_storage_forward_cache import ForwardCacheLatentsStorage
from ..services.model_install import ModelInstallService
@ -80,7 +80,7 @@ class ApiDependencies:
board_records = SqliteBoardRecordStorage(db=db)
boards = BoardService()
events = FastAPIEventService(event_handler_id)
graph_execution_manager = SqliteItemStorage[GraphExecutionState](db=db, table_name="graph_executions")
graph_execution_manager = ItemStorageMemory[GraphExecutionState]()
image_records = SqliteImageRecordStorage(db=db)
images = ImageService()
invocation_cache = MemoryInvocationCache(max_cache_size=config.node_cache_size)

View File

@ -1,7 +1,7 @@
# Copyright (c) 2023 Lincoln D. Stein
"""FastAPI route for model configuration records."""
import pathlib
from hashlib import sha1
from random import randbytes
from typing import Any, Dict, List, Optional, Set
@ -27,6 +27,7 @@ from invokeai.backend.model_manager.config import (
ModelFormat,
ModelType,
)
from invokeai.backend.model_manager.merge import MergeInterpolationMethod, ModelMerger
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata
from ..dependencies import ApiDependencies
@ -415,3 +416,57 @@ async def sync_models_to_config() -> Response:
"""
ApiDependencies.invoker.services.model_install.sync_to_config()
return Response(status_code=204)
@model_records_router.put(
"/merge",
operation_id="merge",
)
async def merge(
keys: List[str] = Body(description="Keys for two to three models to merge", min_length=2, max_length=3),
merged_model_name: Optional[str] = Body(description="Name of destination model", default=None),
alpha: float = Body(description="Alpha weighting strength to apply to 2d and 3d models", default=0.5),
force: bool = Body(
description="Force merging of models created with different versions of diffusers",
default=False,
),
interp: Optional[MergeInterpolationMethod] = Body(description="Interpolation method", default=None),
merge_dest_directory: Optional[str] = Body(
description="Save the merged model to the designated directory (with 'merged_model_name' appended)",
default=None,
),
) -> AnyModelConfig:
"""
Merge diffusers models.
keys: List of 2-3 model keys to merge together. All models must use the same base type.
merged_model_name: Name for the merged model [Concat model names]
alpha: Alpha value (0.0-1.0). Higher values give more weight to the second model [0.5]
force: If true, force the merge even if the models were generated by different versions of the diffusers library [False]
interp: Interpolation method. One of "weighted_sum", "sigmoid", "inv_sigmoid" or "add_difference" [weighted_sum]
merge_dest_directory: Specify a directory to store the merged model in [models directory]
"""
print(f"here i am, keys={keys}")
logger = ApiDependencies.invoker.services.logger
try:
logger.info(f"Merging models: {keys} into {merge_dest_directory or '<MODELS>'}/{merged_model_name}")
dest = pathlib.Path(merge_dest_directory) if merge_dest_directory else None
installer = ApiDependencies.invoker.services.model_install
merger = ModelMerger(installer)
model_names = [installer.record_store.get_model(x).name for x in keys]
response = merger.merge_diffusion_models_and_save(
model_keys=keys,
merged_model_name=merged_model_name or "+".join(model_names),
alpha=alpha,
interp=interp,
force=force,
merge_dest_directory=dest,
)
except UnknownModelException:
raise HTTPException(
status_code=404,
detail=f"One or more of the models '{keys}' not found",
)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
return response

View File

@ -30,6 +30,7 @@ from invokeai.app.invocations.primitives import ImageField, ImageOutput
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
from invokeai.app.shared.fields import FieldDescriptions
from invokeai.backend.image_util.depth_anything import DepthAnythingDetector
from ...backend.model_management import BaseModelType
from .baseinvocation import (
@ -602,3 +603,33 @@ class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
color_map = cv2.resize(color_map, (width, height), interpolation=cv2.INTER_NEAREST)
color_map = Image.fromarray(color_map)
return color_map
DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small"]
@invocation(
"depth_anything_image_processor",
title="Depth Anything Processor",
tags=["controlnet", "depth", "depth anything"],
category="controlnet",
version="1.0.0",
)
class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
"""Generates a depth map based on the Depth Anything algorithm"""
model_size: DEPTH_ANYTHING_MODEL_SIZES = InputField(
default="small", description="The size of the depth model to use"
)
resolution: int = InputField(default=512, ge=64, multiple_of=64, description=FieldDescriptions.image_res)
offload: bool = InputField(default=False)
def run_processor(self, image):
depth_anything_detector = DepthAnythingDetector()
depth_anything_detector.load_model(model_size=self.model_size)
if image.mode == "RGBA":
image = image.convert("RGB")
processed_image = depth_anything_detector(image=image, resolution=self.resolution, offload=self.offload)
return processed_image

View File

@ -251,7 +251,11 @@ class InvokeAIAppConfig(InvokeAISettings):
log_level : Literal["debug", "info", "warning", "error", "critical"] = Field(default="info", description="Emit logging messages at this level or higher", json_schema_extra=Categories.Logging)
log_sql : bool = Field(default=False, description="Log SQL queries", json_schema_extra=Categories.Logging)
# Development
dev_reload : bool = Field(default=False, description="Automatically reload when Python sources are changed.", json_schema_extra=Categories.Development)
profile_graphs : bool = Field(default=False, description="Enable graph profiling", json_schema_extra=Categories.Development)
profile_prefix : Optional[str] = Field(default=None, description="An optional prefix for profile output files.", json_schema_extra=Categories.Development)
profiles_dir : Path = Field(default=Path('profiles'), description="Directory for graph profiles", json_schema_extra=Categories.Development)
version : bool = Field(default=False, description="Show InvokeAI version and exit", json_schema_extra=Categories.Other)
@ -270,7 +274,7 @@ class InvokeAIAppConfig(InvokeAISettings):
attention_type : Literal["auto", "normal", "xformers", "sliced", "torch-sdp"] = Field(default="auto", description="Attention type", json_schema_extra=Categories.Generation)
attention_slice_size: Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', json_schema_extra=Categories.Generation)
force_tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", json_schema_extra=Categories.Generation)
png_compress_level : int = Field(default=6, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", json_schema_extra=Categories.Generation)
png_compress_level : int = Field(default=1, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", json_schema_extra=Categories.Generation)
# QUEUE
max_queue_size : int = Field(default=10000, gt=0, description="Maximum number of items in the session queue", json_schema_extra=Categories.Queue)
@ -280,6 +284,9 @@ class InvokeAIAppConfig(InvokeAISettings):
deny_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to deny. Omit to deny none.", json_schema_extra=Categories.Nodes)
node_cache_size : int = Field(default=512, description="How many cached nodes to keep in memory", json_schema_extra=Categories.Nodes)
# MODEL IMPORT
civitai_api_key : Optional[str] = Field(default=os.environ.get("CIVITAI_API_KEY"), description="API key for CivitAI", json_schema_extra=Categories.Other)
# DEPRECATED FIELDS - STILL HERE IN ORDER TO OBTAN VALUES FROM PRE-3.1 CONFIG FILES
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", json_schema_extra=Categories.MemoryPerformance)
max_cache_size : Optional[float] = Field(default=None, gt=0, description="Maximum memory amount used by model cache for rapid switching", json_schema_extra=Categories.MemoryPerformance)
@ -289,6 +296,7 @@ class InvokeAIAppConfig(InvokeAISettings):
lora_dir : Optional[Path] = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', json_schema_extra=Categories.Paths)
embedding_dir : Optional[Path] = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
controlnet_dir : Optional[Path] = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
# this is not referred to in the source code and can be removed entirely
#free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", json_schema_extra=Categories.MemoryPerformance)
@ -449,6 +457,11 @@ class InvokeAIAppConfig(InvokeAISettings):
disabled_in_config = not self.xformers_enabled
return disabled_in_config and self.attention_type != "xformers"
@property
def profiles_path(self) -> Path:
"""Path to the graph profiles directory."""
return self._resolve(self.profiles_dir)
@staticmethod
def find_root() -> Path:
"""Choose the runtime root directory when not specified on command line or init file."""

View File

@ -208,7 +208,6 @@ class DownloadQueueService(DownloadQueueServiceBase):
job = self._queue.get(timeout=1)
except Empty:
continue
try:
job.job_started = get_iso_timestamp()
self._do_download(job)

View File

@ -1,11 +1,16 @@
import time
import traceback
from contextlib import suppress
from threading import BoundedSemaphore, Event, Thread
from typing import Optional
import invokeai.backend.util.logging as logger
from invokeai.app.invocations.baseinvocation import InvocationContext
from invokeai.app.services.invocation_queue.invocation_queue_common import InvocationQueueItem
from invokeai.app.services.invocation_stats.invocation_stats_common import (
GESStatsNotFoundError,
)
from invokeai.app.util.profiler import Profiler
from ..invoker import Invoker
from .invocation_processor_base import InvocationProcessorABC
@ -18,7 +23,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
__invoker: Invoker
__threadLimit: BoundedSemaphore
def start(self, invoker) -> None:
def start(self, invoker: Invoker) -> None:
# if we do want multithreading at some point, we could make this configurable
self.__threadLimit = BoundedSemaphore(1)
self.__invoker = invoker
@ -39,6 +44,16 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
self.__threadLimit.acquire()
queue_item: Optional[InvocationQueueItem] = None
profiler = (
Profiler(
logger=self.__invoker.services.logger,
output_dir=self.__invoker.services.configuration.profiles_path,
prefix=self.__invoker.services.configuration.profile_prefix,
)
if self.__invoker.services.configuration.profile_graphs
else None
)
while not stop_event.is_set():
try:
queue_item = self.__invoker.services.queue.get()
@ -49,6 +64,10 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
# do not hammer the queue
time.sleep(0.5)
continue
if profiler and profiler.profile_id != queue_item.graph_execution_state_id:
profiler.start(profile_id=queue_item.graph_execution_state_id)
try:
graph_execution_state = self.__invoker.services.graph_execution_manager.get(
queue_item.graph_execution_state_id
@ -137,7 +156,8 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
pass
except CanceledException:
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
with suppress(GESStatsNotFoundError):
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
pass
except Exception as e:
@ -162,7 +182,8 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
error_type=e.__class__.__name__,
error=error,
)
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
with suppress(GESStatsNotFoundError):
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
pass
# Check queue to see if this is canceled, and skip if so
@ -194,13 +215,21 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
error=traceback.format_exc(),
)
elif is_complete:
self.__invoker.services.performance_statistics.log_stats(graph_execution_state.id)
self.__invoker.services.events.emit_graph_execution_complete(
queue_batch_id=queue_item.session_queue_batch_id,
queue_item_id=queue_item.session_queue_item_id,
queue_id=queue_item.session_queue_id,
graph_execution_state_id=graph_execution_state.id,
)
with suppress(GESStatsNotFoundError):
self.__invoker.services.performance_statistics.log_stats(graph_execution_state.id)
self.__invoker.services.events.emit_graph_execution_complete(
queue_batch_id=queue_item.session_queue_batch_id,
queue_item_id=queue_item.session_queue_item_id,
queue_id=queue_item.session_queue_id,
graph_execution_state_id=graph_execution_state.id,
)
if profiler:
profile_path = profiler.stop()
stats_path = profile_path.with_suffix(".json")
self.__invoker.services.performance_statistics.dump_stats(
graph_execution_state_id=graph_execution_state.id, output_path=stats_path
)
self.__invoker.services.performance_statistics.reset_stats(graph_execution_state.id)
except KeyboardInterrupt:
pass # Log something? KeyboardInterrupt is probably not going to be seen by the processor

View File

@ -30,8 +30,10 @@ writes to the system log is stored in InvocationServices.performance_statistics.
from abc import ABC, abstractmethod
from contextlib import AbstractContextManager
from pathlib import Path
from invokeai.app.invocations.baseinvocation import BaseInvocation
from invokeai.app.services.invocation_stats.invocation_stats_common import InvocationStatsSummary
class InvocationStatsServiceBase(ABC):
@ -61,8 +63,9 @@ class InvocationStatsServiceBase(ABC):
@abstractmethod
def reset_stats(self, graph_execution_state_id: str):
"""
Reset all statistics for the indicated graph
:param graph_execution_state_id
Reset all statistics for the indicated graph.
:param graph_execution_state_id: The id of the session whose stats to reset.
:raises GESStatsNotFoundError: if the graph isn't tracked in the stats.
"""
pass
@ -70,5 +73,26 @@ class InvocationStatsServiceBase(ABC):
def log_stats(self, graph_execution_state_id: str):
"""
Write out the accumulated statistics to the log or somewhere else.
:param graph_execution_state_id: The id of the session whose stats to log.
:raises GESStatsNotFoundError: if the graph isn't tracked in the stats.
"""
pass
@abstractmethod
def get_stats(self, graph_execution_state_id: str) -> InvocationStatsSummary:
"""
Gets the accumulated statistics for the indicated graph.
:param graph_execution_state_id: The id of the session whose stats to get.
:raises GESStatsNotFoundError: if the graph isn't tracked in the stats.
"""
pass
@abstractmethod
def dump_stats(self, graph_execution_state_id: str, output_path: Path) -> None:
"""
Write out the accumulated statistics to the indicated path as JSON.
:param graph_execution_state_id: The id of the session whose stats to dump.
:param output_path: The file to write the stats to.
:raises GESStatsNotFoundError: if the graph isn't tracked in the stats.
"""
pass

View File

@ -1,5 +1,91 @@
from collections import defaultdict
from dataclasses import dataclass
from dataclasses import asdict, dataclass
from typing import Any, Optional
class GESStatsNotFoundError(Exception):
"""Raised when execution stats are not found for a given Graph Execution State."""
@dataclass
class NodeExecutionStatsSummary:
"""The stats for a specific type of node."""
node_type: str
num_calls: int
time_used_seconds: float
peak_vram_gb: float
@dataclass
class ModelCacheStatsSummary:
"""The stats for the model cache."""
high_water_mark_gb: float
cache_size_gb: float
total_usage_gb: float
cache_hits: int
cache_misses: int
models_cached: int
models_cleared: int
@dataclass
class GraphExecutionStatsSummary:
"""The stats for the graph execution state."""
graph_execution_state_id: str
execution_time_seconds: float
# `wall_time_seconds`, `ram_usage_gb` and `ram_change_gb` are derived from the node execution stats.
# In some situations, there are no node stats, so these values are optional.
wall_time_seconds: Optional[float]
ram_usage_gb: Optional[float]
ram_change_gb: Optional[float]
@dataclass
class InvocationStatsSummary:
"""
The accumulated stats for a graph execution.
Its `__str__` method returns a human-readable stats summary.
"""
vram_usage_gb: Optional[float]
graph_stats: GraphExecutionStatsSummary
model_cache_stats: ModelCacheStatsSummary
node_stats: list[NodeExecutionStatsSummary]
def __str__(self) -> str:
_str = ""
_str = f"Graph stats: {self.graph_stats.graph_execution_state_id}\n"
_str += f"{'Node':>30} {'Calls':>7} {'Seconds':>9} {'VRAM Used':>10}\n"
for summary in self.node_stats:
_str += f"{summary.node_type:>30} {summary.num_calls:>7} {summary.time_used_seconds:>8.3f}s {summary.peak_vram_gb:>9.3f}G\n"
_str += f"TOTAL GRAPH EXECUTION TIME: {self.graph_stats.execution_time_seconds:7.3f}s\n"
if self.graph_stats.wall_time_seconds is not None:
_str += f"TOTAL GRAPH WALL TIME: {self.graph_stats.wall_time_seconds:7.3f}s\n"
if self.graph_stats.ram_usage_gb is not None and self.graph_stats.ram_change_gb is not None:
_str += f"RAM used by InvokeAI process: {self.graph_stats.ram_usage_gb:4.2f}G ({self.graph_stats.ram_change_gb:+5.3f}G)\n"
_str += f"RAM used to load models: {self.model_cache_stats.total_usage_gb:4.2f}G\n"
if self.vram_usage_gb:
_str += f"VRAM in use: {self.vram_usage_gb:4.3f}G\n"
_str += "RAM cache statistics:\n"
_str += f" Model cache hits: {self.model_cache_stats.cache_hits}\n"
_str += f" Model cache misses: {self.model_cache_stats.cache_misses}\n"
_str += f" Models cached: {self.model_cache_stats.models_cached}\n"
_str += f" Models cleared from cache: {self.model_cache_stats.models_cleared}\n"
_str += f" Cache high water mark: {self.model_cache_stats.high_water_mark_gb:4.2f}/{self.model_cache_stats.cache_size_gb:4.2f}G\n"
return _str
def as_dict(self) -> dict[str, Any]:
"""Returns the stats as a dictionary."""
return asdict(self)
@dataclass
@ -55,12 +141,33 @@ class GraphExecutionStats:
return last_node
def get_pretty_log(self, graph_execution_state_id: str) -> str:
log = f"Graph stats: {graph_execution_state_id}\n"
log += f"{'Node':>30} {'Calls':>7}{'Seconds':>9} {'VRAM Used':>10}\n"
def get_graph_stats_summary(self, graph_execution_state_id: str) -> GraphExecutionStatsSummary:
"""Get a summary of the graph stats."""
first_node = self.get_first_node_stats()
last_node = self.get_last_node_stats()
# Log stats aggregated by node type.
wall_time_seconds: Optional[float] = None
ram_usage_gb: Optional[float] = None
ram_change_gb: Optional[float] = None
if last_node and first_node:
wall_time_seconds = last_node.end_time - first_node.start_time
ram_usage_gb = last_node.end_ram_gb
ram_change_gb = last_node.end_ram_gb - first_node.start_ram_gb
return GraphExecutionStatsSummary(
graph_execution_state_id=graph_execution_state_id,
execution_time_seconds=self.get_total_run_time(),
wall_time_seconds=wall_time_seconds,
ram_usage_gb=ram_usage_gb,
ram_change_gb=ram_change_gb,
)
def get_node_stats_summaries(self) -> list[NodeExecutionStatsSummary]:
"""Get a summary of the node stats."""
summaries: list[NodeExecutionStatsSummary] = []
node_stats_by_type: dict[str, list[NodeExecutionStats]] = defaultdict(list)
for node_stats in self._node_stats_list:
node_stats_by_type[node_stats.invocation_type].append(node_stats)
@ -68,17 +175,9 @@ class GraphExecutionStats:
num_calls = len(node_type_stats_list)
time_used = sum([n.total_time() for n in node_type_stats_list])
peak_vram = max([n.peak_vram_gb for n in node_type_stats_list])
log += f"{node_type:>30} {num_calls:>4} {time_used:7.3f}s {peak_vram:4.3f}G\n"
summary = NodeExecutionStatsSummary(
node_type=node_type, num_calls=num_calls, time_used_seconds=time_used, peak_vram_gb=peak_vram
)
summaries.append(summary)
# Log stats for the entire graph.
log += f"TOTAL GRAPH EXECUTION TIME: {self.get_total_run_time():7.3f}s\n"
first_node = self.get_first_node_stats()
last_node = self.get_last_node_stats()
if first_node is not None and last_node is not None:
total_wall_time = last_node.end_time - first_node.start_time
ram_change = last_node.end_ram_gb - first_node.start_ram_gb
log += f"TOTAL GRAPH WALL TIME: {total_wall_time:7.3f}s\n"
log += f"RAM used by InvokeAI process: {last_node.end_ram_gb:4.2f}G ({ram_change:+5.3f}G)\n"
return log
return summaries

View File

@ -1,5 +1,7 @@
import json
import time
from contextlib import contextmanager
from pathlib import Path
import psutil
import torch
@ -10,7 +12,15 @@ from invokeai.app.services.invoker import Invoker
from invokeai.backend.model_management.model_cache import CacheStats
from .invocation_stats_base import InvocationStatsServiceBase
from .invocation_stats_common import GraphExecutionStats, NodeExecutionStats
from .invocation_stats_common import (
GESStatsNotFoundError,
GraphExecutionStats,
GraphExecutionStatsSummary,
InvocationStatsSummary,
ModelCacheStatsSummary,
NodeExecutionStats,
NodeExecutionStatsSummary,
)
# Size of 1GB in bytes.
GB = 2**30
@ -95,31 +105,66 @@ class InvocationStatsService(InvocationStatsServiceBase):
del self._stats[graph_execution_state_id]
del self._cache_stats[graph_execution_state_id]
except KeyError as e:
logger.warning(f"Attempted to clear statistics for unknown graph {graph_execution_state_id}: {e}.")
msg = f"Attempted to clear statistics for unknown graph {graph_execution_state_id}: {e}."
logger.error(msg)
raise GESStatsNotFoundError(msg) from e
def log_stats(self, graph_execution_state_id: str):
def get_stats(self, graph_execution_state_id: str) -> InvocationStatsSummary:
graph_stats_summary = self._get_graph_summary(graph_execution_state_id)
node_stats_summaries = self._get_node_summaries(graph_execution_state_id)
model_cache_stats_summary = self._get_model_cache_summary(graph_execution_state_id)
vram_usage_gb = torch.cuda.memory_allocated() / GB if torch.cuda.is_available() else None
return InvocationStatsSummary(
graph_stats=graph_stats_summary,
model_cache_stats=model_cache_stats_summary,
node_stats=node_stats_summaries,
vram_usage_gb=vram_usage_gb,
)
def log_stats(self, graph_execution_state_id: str) -> None:
stats = self.get_stats(graph_execution_state_id)
logger.info(str(stats))
def dump_stats(self, graph_execution_state_id: str, output_path: Path) -> None:
stats = self.get_stats(graph_execution_state_id)
with open(output_path, "w") as f:
f.write(json.dumps(stats.as_dict(), indent=2))
def _get_model_cache_summary(self, graph_execution_state_id: str) -> ModelCacheStatsSummary:
try:
graph_stats = self._stats[graph_execution_state_id]
cache_stats = self._cache_stats[graph_execution_state_id]
except KeyError as e:
logger.warning(f"Attempted to log statistics for unknown graph {graph_execution_state_id}: {e}.")
return
msg = f"Attempted to get model cache statistics for unknown graph {graph_execution_state_id}: {e}."
logger.error(msg)
raise GESStatsNotFoundError(msg) from e
log = graph_stats.get_pretty_log(graph_execution_state_id)
return ModelCacheStatsSummary(
cache_hits=cache_stats.hits,
cache_misses=cache_stats.misses,
high_water_mark_gb=cache_stats.high_watermark / GB,
cache_size_gb=cache_stats.cache_size / GB,
total_usage_gb=sum(list(cache_stats.loaded_model_sizes.values())) / GB,
models_cached=cache_stats.in_cache,
models_cleared=cache_stats.cleared,
)
hwm = cache_stats.high_watermark / GB
tot = cache_stats.cache_size / GB
loaded = sum(list(cache_stats.loaded_model_sizes.values())) / GB
log += f"RAM used to load models: {loaded:4.2f}G\n"
if torch.cuda.is_available():
log += f"VRAM in use: {(torch.cuda.memory_allocated() / GB):4.3f}G\n"
log += "RAM cache statistics:\n"
log += f" Model cache hits: {cache_stats.hits}\n"
log += f" Model cache misses: {cache_stats.misses}\n"
log += f" Models cached: {cache_stats.in_cache}\n"
log += f" Models cleared from cache: {cache_stats.cleared}\n"
log += f" Cache high water mark: {hwm:4.2f}/{tot:4.2f}G\n"
logger.info(log)
def _get_graph_summary(self, graph_execution_state_id: str) -> GraphExecutionStatsSummary:
try:
graph_stats = self._stats[graph_execution_state_id]
except KeyError as e:
msg = f"Attempted to get graph statistics for unknown graph {graph_execution_state_id}: {e}."
logger.error(msg)
raise GESStatsNotFoundError(msg) from e
del self._stats[graph_execution_state_id]
del self._cache_stats[graph_execution_state_id]
return graph_stats.get_graph_stats_summary(graph_execution_state_id)
def _get_node_summaries(self, graph_execution_state_id: str) -> list[NodeExecutionStatsSummary]:
try:
graph_stats = self._stats[graph_execution_state_id]
except KeyError as e:
msg = f"Attempted to get node statistics for unknown graph {graph_execution_state_id}: {e}."
logger.error(msg)
raise GESStatsNotFoundError(msg) from e
return graph_stats.get_node_stats_summaries()

View File

@ -1,10 +1,8 @@
from abc import ABC, abstractmethod
from typing import Callable, Generic, Optional, TypeVar
from typing import Callable, Generic, TypeVar
from pydantic import BaseModel
from invokeai.app.services.shared.pagination import PaginatedResults
T = TypeVar("T", bound=BaseModel)
@ -25,23 +23,14 @@ class ItemStorageABC(ABC, Generic[T]):
"""Gets the item, parsing it into a Pydantic model"""
pass
@abstractmethod
def get_raw(self, item_id: str) -> Optional[str]:
"""Gets the raw item as a string, skipping Pydantic parsing"""
pass
@abstractmethod
def set(self, item: T) -> None:
"""Sets the item"""
pass
@abstractmethod
def list(self, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
"""Gets a paginated list of items"""
pass
@abstractmethod
def search(self, query: str, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
def delete(self, item_id: str) -> None:
"""Deletes the item"""
pass
def on_changed(self, on_changed: Callable[[T], None]) -> None:

View File

@ -0,0 +1,50 @@
from collections import OrderedDict
from contextlib import suppress
from typing import Generic, Optional, TypeVar
from pydantic import BaseModel
from invokeai.app.services.item_storage.item_storage_base import ItemStorageABC
T = TypeVar("T", bound=BaseModel)
class ItemStorageMemory(ItemStorageABC, Generic[T]):
"""
Provides a simple in-memory storage for items, with a maximum number of items to store.
The storage uses the LRU strategy to evict items from storage when the max has been reached.
"""
def __init__(self, id_field: str = "id", max_items: int = 10) -> None:
super().__init__()
if max_items < 1:
raise ValueError("max_items must be at least 1")
if not id_field:
raise ValueError("id_field must not be empty")
self._id_field = id_field
self._items: OrderedDict[str, T] = OrderedDict()
self._max_items = max_items
def get(self, item_id: str) -> Optional[T]:
# If the item exists, move it to the end of the OrderedDict.
item = self._items.pop(item_id, None)
if item is not None:
self._items[item_id] = item
return self._items.get(item_id)
def set(self, item: T) -> None:
item_id = getattr(item, self._id_field)
if item_id in self._items:
# If item already exists, remove it and add it to the end
self._items.pop(item_id)
elif len(self._items) >= self._max_items:
# If cache is full, evict the least recently used item
self._items.popitem(last=False)
self._items[item_id] = item
self._on_changed(item)
def delete(self, item_id: str) -> None:
# This is a no-op if the item doesn't exist.
with suppress(KeyError):
del self._items[item_id]
self._on_deleted(item_id)

View File

@ -1,147 +0,0 @@
import sqlite3
import threading
from typing import Generic, Optional, TypeVar, get_args
from pydantic import BaseModel, TypeAdapter
from invokeai.app.services.shared.pagination import PaginatedResults
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
from .item_storage_base import ItemStorageABC
T = TypeVar("T", bound=BaseModel)
class SqliteItemStorage(ItemStorageABC, Generic[T]):
_table_name: str
_conn: sqlite3.Connection
_cursor: sqlite3.Cursor
_id_field: str
_lock: threading.RLock
_validator: Optional[TypeAdapter[T]]
def __init__(self, db: SqliteDatabase, table_name: str, id_field: str = "id"):
super().__init__()
self._lock = db.lock
self._conn = db.conn
self._table_name = table_name
self._id_field = id_field # TODO: validate that T has this field
self._cursor = self._conn.cursor()
self._validator: Optional[TypeAdapter[T]] = None
self._create_table()
def _create_table(self):
try:
self._lock.acquire()
self._cursor.execute(
f"""CREATE TABLE IF NOT EXISTS {self._table_name} (
item TEXT,
id TEXT GENERATED ALWAYS AS (json_extract(item, '$.{self._id_field}')) VIRTUAL NOT NULL);"""
)
self._cursor.execute(
f"""CREATE UNIQUE INDEX IF NOT EXISTS {self._table_name}_id ON {self._table_name}(id);"""
)
finally:
self._lock.release()
def _parse_item(self, item: str) -> T:
if self._validator is None:
"""
We don't get access to `__orig_class__` in `__init__()`, and we need this before start(), so
we can create it when it is first needed instead.
__orig_class__ is technically an implementation detail of the typing module, not a supported API
"""
self._validator = TypeAdapter(get_args(self.__orig_class__)[0]) # type: ignore [attr-defined]
return self._validator.validate_json(item)
def set(self, item: T):
try:
self._lock.acquire()
self._cursor.execute(
f"""INSERT OR REPLACE INTO {self._table_name} (item) VALUES (?);""",
(item.model_dump_json(warnings=False, exclude_none=True),),
)
self._conn.commit()
finally:
self._lock.release()
self._on_changed(item)
def get(self, id: str) -> Optional[T]:
try:
self._lock.acquire()
self._cursor.execute(f"""SELECT item FROM {self._table_name} WHERE id = ?;""", (str(id),))
result = self._cursor.fetchone()
finally:
self._lock.release()
if not result:
return None
return self._parse_item(result[0])
def get_raw(self, id: str) -> Optional[str]:
try:
self._lock.acquire()
self._cursor.execute(f"""SELECT item FROM {self._table_name} WHERE id = ?;""", (str(id),))
result = self._cursor.fetchone()
finally:
self._lock.release()
if not result:
return None
return result[0]
def delete(self, id: str):
try:
self._lock.acquire()
self._cursor.execute(f"""DELETE FROM {self._table_name} WHERE id = ?;""", (str(id),))
self._conn.commit()
finally:
self._lock.release()
self._on_deleted(id)
def list(self, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
try:
self._lock.acquire()
self._cursor.execute(
f"""SELECT item FROM {self._table_name} LIMIT ? OFFSET ?;""",
(per_page, page * per_page),
)
result = self._cursor.fetchall()
items = [self._parse_item(r[0]) for r in result]
self._cursor.execute(f"""SELECT count(*) FROM {self._table_name};""")
count = self._cursor.fetchone()[0]
finally:
self._lock.release()
pageCount = int(count / per_page) + 1
return PaginatedResults[T](items=items, page=page, pages=pageCount, per_page=per_page, total=count)
def search(self, query: str, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
try:
self._lock.acquire()
self._cursor.execute(
f"""SELECT item FROM {self._table_name} WHERE item LIKE ? LIMIT ? OFFSET ?;""",
(f"%{query}%", per_page, page * per_page),
)
result = self._cursor.fetchall()
items = [self._parse_item(r[0]) for r in result]
self._cursor.execute(
f"""SELECT count(*) FROM {self._table_name} WHERE item LIKE ?;""",
(f"%{query}%",),
)
count = self._cursor.fetchone()[0]
finally:
self._lock.release()
pageCount = int(count / per_page) + 1
return PaginatedResults[T](items=items, page=page, pages=pageCount, per_page=per_page, total=count)

View File

@ -165,8 +165,8 @@ class ModelInstallJob(BaseModel):
)
source: ModelSource = Field(description="Source (URL, repo_id, or local path) of model")
local_path: Path = Field(description="Path to locally-downloaded model; may be the same as the source")
bytes: Optional[int] = Field(
default=None, description="For a remote model, the number of bytes downloaded so far (may not be available)"
bytes: int = Field(
default=0, description="For a remote model, the number of bytes downloaded so far (may not be available)"
)
total_bytes: int = Field(default=0, description="Total size of the model to be installed")
source_metadata: Optional[AnyModelRepoMetadata] = Field(

View File

@ -535,19 +535,19 @@ class ModelInstallService(ModelInstallServiceBase):
def _import_from_url(self, source: URLModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
# URLs from Civitai or HuggingFace will be handled specially
url_patterns = {
r"https?://civitai.com/": CivitaiMetadataFetch,
r"https?://huggingface.co/": HuggingFaceMetadataFetch,
r"^https?://civitai.com/": CivitaiMetadataFetch,
r"^https?://huggingface.co/[^/]+/[^/]+$": HuggingFaceMetadataFetch,
}
metadata = None
for pattern, fetcher in url_patterns.items():
if re.match(pattern, str(source.url), re.IGNORECASE):
metadata = fetcher(self._session).from_url(source.url)
break
self._logger.debug(f"metadata={metadata}")
if metadata and isinstance(metadata, ModelMetadataWithFiles):
remote_files = metadata.download_urls(session=self._session)
else:
remote_files = [RemoteModelFile(url=source.url, path=Path("."), size=0)]
return self._import_remote_model(
source=source,
config=config,
@ -586,6 +586,7 @@ class ModelInstallService(ModelInstallServiceBase):
assert install_job.total_bytes is not None # to avoid type checking complaints in the loop below
self._logger.info(f"Queuing {source} for downloading")
self._logger.debug(f"remote_files={remote_files}")
for model_file in remote_files:
url = model_file.url
path = model_file.path

View File

@ -7,6 +7,7 @@ from invokeai.app.services.shared.sqlite_migrator.migrations.migration_1 import
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_2 import build_migration_2
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_3 import build_migration_3
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_4 import build_migration_4
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_5 import build_migration_5
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
@ -31,6 +32,7 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
migrator.register_migration(build_migration_2(image_files=image_files, logger=logger))
migrator.register_migration(build_migration_3(app_config=config, logger=logger))
migrator.register_migration(build_migration_4())
migrator.register_migration(build_migration_5())
migrator.run_migrations()
return db

View File

@ -0,0 +1,34 @@
import sqlite3
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
class Migration5Callback:
def __call__(self, cursor: sqlite3.Cursor) -> None:
self._drop_graph_executions(cursor)
def _drop_graph_executions(self, cursor: sqlite3.Cursor) -> None:
"""Drops the `graph_executions` table."""
cursor.execute(
"""--sql
DROP TABLE IF EXISTS graph_executions;
"""
)
def build_migration_5() -> Migration:
"""
Build the migration from database version 4 to 5.
Introduced in v3.6.3, this migration:
- Drops the `graph_executions` table. We are able to do this because we are moving the graph storage
to be purely in-memory.
"""
migration_5 = Migration(
from_version=4,
to_version=5,
callback=Migration5Callback(),
)
return migration_5

View File

@ -72,7 +72,12 @@ class MigrateModelYamlToDb1:
continue
base_type, model_type, model_name = str(model_key).split("/")
hash = FastModelHash.hash(self.config.models_path / stanza.path)
try:
hash = FastModelHash.hash(self.config.models_path / stanza.path)
except OSError:
self.logger.warning(f"The model at {stanza.path} is not a valid file or directory. Skipping migration.")
continue
assert isinstance(model_key, str)
new_key = sha1(model_key.encode("utf-8")).hexdigest()

View File

@ -31,6 +31,7 @@ class WorkflowRecordOrderBy(str, Enum, metaclass=MetaEnum):
class WorkflowCategory(str, Enum, metaclass=MetaEnum):
User = "user"
Default = "default"
Project = "project"
class WorkflowMeta(BaseModel):

View File

@ -0,0 +1,67 @@
import cProfile
from logging import Logger
from pathlib import Path
from typing import Optional
class Profiler:
"""
Simple wrapper around cProfile.
Usage
```
# Create a profiler
profiler = Profiler(logger, output_dir, "sql_query_perf")
# Start a new profile
profiler.start("my_profile")
# Do stuff
profiler.stop()
```
Visualize a profile as a flamegraph with [snakeviz](https://jiffyclub.github.io/snakeviz/)
```sh
snakeviz my_profile.prof
```
Visualize a profile as directed graph with [graphviz](https://graphviz.org/download/) & [gprof2dot](https://github.com/jrfonseca/gprof2dot)
```sh
gprof2dot -f pstats my_profile.prof | dot -Tpng -o my_profile.png
# SVG or PDF may be nicer - you can search for function names
gprof2dot -f pstats my_profile.prof | dot -Tsvg -o my_profile.svg
gprof2dot -f pstats my_profile.prof | dot -Tpdf -o my_profile.pdf
```
"""
def __init__(self, logger: Logger, output_dir: Path, prefix: Optional[str] = None) -> None:
self._logger = logger.getChild(f"profiler.{prefix}" if prefix else "profiler")
self._output_dir = output_dir
self._output_dir.mkdir(parents=True, exist_ok=True)
self._profiler: Optional[cProfile.Profile] = None
self._prefix = prefix
self.profile_id: Optional[str] = None
def start(self, profile_id: str) -> None:
if self._profiler:
self.stop()
self.profile_id = profile_id
self._profiler = cProfile.Profile()
self._profiler.enable()
self._logger.info(f"Started profiling {self.profile_id}.")
def stop(self) -> Path:
if not self._profiler:
raise RuntimeError("Profiler not initialized. Call start() first.")
self._profiler.disable()
filename = f"{self._prefix}_{self.profile_id}.prof" if self._prefix else f"{self.profile_id}.prof"
path = Path(self._output_dir, filename)
self._profiler.dump_stats(path)
self._logger.info(f"Stopped profiling, profile dumped to {path}.")
self._profiler = None
self.profile_id = None
return path

View File

@ -0,0 +1,109 @@
import pathlib
from typing import Literal, Union
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from einops import repeat
from PIL import Image
from torchvision.transforms import Compose
from invokeai.app.services.config.config_default import InvokeAIAppConfig
from invokeai.backend.image_util.depth_anything.model.dpt import DPT_DINOv2
from invokeai.backend.image_util.depth_anything.utilities.util import NormalizeImage, PrepareForNet, Resize
from invokeai.backend.util.devices import choose_torch_device
from invokeai.backend.util.util import download_with_progress_bar
config = InvokeAIAppConfig.get_config()
DEPTH_ANYTHING_MODELS = {
"large": {
"url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitl14.pth?download=true",
"local": "any/annotators/depth_anything/depth_anything_vitl14.pth",
},
"base": {
"url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitb14.pth?download=true",
"local": "any/annotators/depth_anything/depth_anything_vitb14.pth",
},
"small": {
"url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vits14.pth?download=true",
"local": "any/annotators/depth_anything/depth_anything_vits14.pth",
},
}
transform = Compose(
[
Resize(
width=518,
height=518,
resize_target=False,
keep_aspect_ratio=True,
ensure_multiple_of=14,
resize_method="lower_bound",
image_interpolation_method=cv2.INTER_CUBIC,
),
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
PrepareForNet(),
]
)
class DepthAnythingDetector:
def __init__(self) -> None:
self.model = None
self.model_size: Union[Literal["large", "base", "small"], None] = None
def load_model(self, model_size=Literal["large", "base", "small"]):
DEPTH_ANYTHING_MODEL_PATH = pathlib.Path(config.models_path / DEPTH_ANYTHING_MODELS[model_size]["local"])
if not DEPTH_ANYTHING_MODEL_PATH.exists():
download_with_progress_bar(DEPTH_ANYTHING_MODELS[model_size]["url"], DEPTH_ANYTHING_MODEL_PATH)
if not self.model or model_size != self.model_size:
del self.model
self.model_size = model_size
match self.model_size:
case "small":
self.model = DPT_DINOv2(encoder="vits", features=64, out_channels=[48, 96, 192, 384])
case "base":
self.model = DPT_DINOv2(encoder="vitb", features=128, out_channels=[96, 192, 384, 768])
case "large":
self.model = DPT_DINOv2(encoder="vitl", features=256, out_channels=[256, 512, 1024, 1024])
case _:
raise TypeError("Not a supported model")
self.model.load_state_dict(torch.load(DEPTH_ANYTHING_MODEL_PATH.as_posix(), map_location="cpu"))
self.model.eval()
self.model.to(choose_torch_device())
return self.model
def to(self, device):
self.model.to(device)
return self
def __call__(self, image, resolution=512, offload=False):
image = np.array(image, dtype=np.uint8)
image = image[:, :, ::-1] / 255.0
image_height, image_width = image.shape[:2]
image = transform({"image": image})["image"]
image = torch.from_numpy(image).unsqueeze(0).to(choose_torch_device())
with torch.no_grad():
depth = self.model(image)
depth = F.interpolate(depth[None], (image_height, image_width), mode="bilinear", align_corners=False)[0, 0]
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
depth_map = repeat(depth, "h w -> h w 3").cpu().numpy().astype(np.uint8)
depth_map = Image.fromarray(depth_map)
new_height = int(image_height * (resolution / image_width))
depth_map = depth_map.resize((resolution, new_height))
if offload:
del self.model
return depth_map

View File

@ -0,0 +1,145 @@
import torch.nn as nn
def _make_scratch(in_shape, out_shape, groups=1, expand=False):
scratch = nn.Module()
out_shape1 = out_shape
out_shape2 = out_shape
out_shape3 = out_shape
if len(in_shape) >= 4:
out_shape4 = out_shape
if expand:
out_shape1 = out_shape
out_shape2 = out_shape * 2
out_shape3 = out_shape * 4
if len(in_shape) >= 4:
out_shape4 = out_shape * 8
scratch.layer1_rn = nn.Conv2d(
in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
)
scratch.layer2_rn = nn.Conv2d(
in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
)
scratch.layer3_rn = nn.Conv2d(
in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
)
if len(in_shape) >= 4:
scratch.layer4_rn = nn.Conv2d(
in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
)
return scratch
class ResidualConvUnit(nn.Module):
"""Residual convolution module."""
def __init__(self, features, activation, bn):
"""Init.
Args:
features (int): number of features
"""
super().__init__()
self.bn = bn
self.groups = 1
self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
if self.bn:
self.bn1 = nn.BatchNorm2d(features)
self.bn2 = nn.BatchNorm2d(features)
self.activation = activation
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: output
"""
out = self.activation(x)
out = self.conv1(out)
if self.bn:
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
if self.bn:
out = self.bn2(out)
if self.groups > 1:
out = self.conv_merge(out)
return self.skip_add.add(out, x)
class FeatureFusionBlock(nn.Module):
"""Feature fusion block."""
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None):
"""Init.
Args:
features (int): number of features
"""
super(FeatureFusionBlock, self).__init__()
self.deconv = deconv
self.align_corners = align_corners
self.groups = 1
self.expand = expand
out_features = features
if self.expand:
out_features = features // 2
self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
self.resConfUnit1 = ResidualConvUnit(features, activation, bn)
self.resConfUnit2 = ResidualConvUnit(features, activation, bn)
self.skip_add = nn.quantized.FloatFunctional()
self.size = size
def forward(self, *xs, size=None):
"""Forward pass.
Returns:
tensor: output
"""
output = xs[0]
if len(xs) == 2:
res = self.resConfUnit1(xs[1])
output = self.skip_add.add(output, res)
output = self.resConfUnit2(output)
if (size is None) and (self.size is None):
modifier = {"scale_factor": 2}
elif size is None:
modifier = {"size": self.size}
else:
modifier = {"size": size}
output = nn.functional.interpolate(output, **modifier, mode="bilinear", align_corners=self.align_corners)
output = self.out_conv(output)
return output

View File

@ -0,0 +1,183 @@
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from .blocks import FeatureFusionBlock, _make_scratch
torchhub_path = Path(__file__).parent.parent / "torchhub"
def _make_fusion_block(features, use_bn, size=None):
return FeatureFusionBlock(
features,
nn.ReLU(False),
deconv=False,
bn=use_bn,
expand=False,
align_corners=True,
size=size,
)
class DPTHead(nn.Module):
def __init__(self, nclass, in_channels, features, out_channels, use_bn=False, use_clstoken=False):
super(DPTHead, self).__init__()
self.nclass = nclass
self.use_clstoken = use_clstoken
self.projects = nn.ModuleList(
[
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channel,
kernel_size=1,
stride=1,
padding=0,
)
for out_channel in out_channels
]
)
self.resize_layers = nn.ModuleList(
[
nn.ConvTranspose2d(
in_channels=out_channels[0], out_channels=out_channels[0], kernel_size=4, stride=4, padding=0
),
nn.ConvTranspose2d(
in_channels=out_channels[1], out_channels=out_channels[1], kernel_size=2, stride=2, padding=0
),
nn.Identity(),
nn.Conv2d(
in_channels=out_channels[3], out_channels=out_channels[3], kernel_size=3, stride=2, padding=1
),
]
)
if use_clstoken:
self.readout_projects = nn.ModuleList()
for _ in range(len(self.projects)):
self.readout_projects.append(nn.Sequential(nn.Linear(2 * in_channels, in_channels), nn.GELU()))
self.scratch = _make_scratch(
out_channels,
features,
groups=1,
expand=False,
)
self.scratch.stem_transpose = None
self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
head_features_1 = features
head_features_2 = 32
if nclass > 1:
self.scratch.output_conv = nn.Sequential(
nn.Conv2d(head_features_1, head_features_1, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(head_features_1, nclass, kernel_size=1, stride=1, padding=0),
)
else:
self.scratch.output_conv1 = nn.Conv2d(
head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1
)
self.scratch.output_conv2 = nn.Sequential(
nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0),
nn.ReLU(True),
nn.Identity(),
)
def forward(self, out_features, patch_h, patch_w):
out = []
for i, x in enumerate(out_features):
if self.use_clstoken:
x, cls_token = x[0], x[1]
readout = cls_token.unsqueeze(1).expand_as(x)
x = self.readout_projects[i](torch.cat((x, readout), -1))
else:
x = x[0]
x = x.permute(0, 2, 1).reshape((x.shape[0], x.shape[-1], patch_h, patch_w))
x = self.projects[i](x)
x = self.resize_layers[i](x)
out.append(x)
layer_1, layer_2, layer_3, layer_4 = out
layer_1_rn = self.scratch.layer1_rn(layer_1)
layer_2_rn = self.scratch.layer2_rn(layer_2)
layer_3_rn = self.scratch.layer3_rn(layer_3)
layer_4_rn = self.scratch.layer4_rn(layer_4)
path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:])
path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:])
path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:])
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
out = self.scratch.output_conv1(path_1)
out = F.interpolate(out, (int(patch_h * 14), int(patch_w * 14)), mode="bilinear", align_corners=True)
out = self.scratch.output_conv2(out)
return out
class DPT_DINOv2(nn.Module):
def __init__(
self,
features,
out_channels,
encoder="vitl",
use_bn=False,
use_clstoken=False,
):
super(DPT_DINOv2, self).__init__()
assert encoder in ["vits", "vitb", "vitl"]
# # in case the Internet connection is not stable, please load the DINOv2 locally
# if use_local:
# self.pretrained = torch.hub.load(
# torchhub_path / "facebookresearch_dinov2_main",
# "dinov2_{:}14".format(encoder),
# source="local",
# pretrained=False,
# )
# else:
# self.pretrained = torch.hub.load(
# "facebookresearch/dinov2",
# "dinov2_{:}14".format(encoder),
# )
self.pretrained = torch.hub.load(
"facebookresearch/dinov2",
"dinov2_{:}14".format(encoder),
)
dim = self.pretrained.blocks[0].attn.qkv.in_features
self.depth_head = DPTHead(1, dim, features, out_channels=out_channels, use_bn=use_bn, use_clstoken=use_clstoken)
def forward(self, x):
h, w = x.shape[-2:]
features = self.pretrained.get_intermediate_layers(x, 4, return_class_token=True)
patch_h, patch_w = h // 14, w // 14
depth = self.depth_head(features, patch_h, patch_w)
depth = F.interpolate(depth, size=(h, w), mode="bilinear", align_corners=True)
depth = F.relu(depth)
return depth.squeeze(1)

View File

@ -0,0 +1,227 @@
import math
import cv2
import numpy as np
import torch
import torch.nn.functional as F
def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
"""Rezise the sample to ensure the given size. Keeps aspect ratio.
Args:
sample (dict): sample
size (tuple): image size
Returns:
tuple: new size
"""
shape = list(sample["disparity"].shape)
if shape[0] >= size[0] and shape[1] >= size[1]:
return sample
scale = [0, 0]
scale[0] = size[0] / shape[0]
scale[1] = size[1] / shape[1]
scale = max(scale)
shape[0] = math.ceil(scale * shape[0])
shape[1] = math.ceil(scale * shape[1])
# resize
sample["image"] = cv2.resize(sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method)
sample["disparity"] = cv2.resize(sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST)
sample["mask"] = cv2.resize(
sample["mask"].astype(np.float32),
tuple(shape[::-1]),
interpolation=cv2.INTER_NEAREST,
)
sample["mask"] = sample["mask"].astype(bool)
return tuple(shape)
class Resize(object):
"""Resize sample to given size (width, height)."""
def __init__(
self,
width,
height,
resize_target=True,
keep_aspect_ratio=False,
ensure_multiple_of=1,
resize_method="lower_bound",
image_interpolation_method=cv2.INTER_AREA,
):
"""Init.
Args:
width (int): desired output width
height (int): desired output height
resize_target (bool, optional):
True: Resize the full sample (image, mask, target).
False: Resize image only.
Defaults to True.
keep_aspect_ratio (bool, optional):
True: Keep the aspect ratio of the input sample.
Output sample might not have the given width and height, and
resize behaviour depends on the parameter 'resize_method'.
Defaults to False.
ensure_multiple_of (int, optional):
Output width and height is constrained to be multiple of this parameter.
Defaults to 1.
resize_method (str, optional):
"lower_bound": Output will be at least as large as the given size.
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller
than given size.)
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
Defaults to "lower_bound".
"""
self.__width = width
self.__height = height
self.__resize_target = resize_target
self.__keep_aspect_ratio = keep_aspect_ratio
self.__multiple_of = ensure_multiple_of
self.__resize_method = resize_method
self.__image_interpolation_method = image_interpolation_method
def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
if max_val is not None and y > max_val:
y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
if y < min_val:
y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
return y
def get_size(self, width, height):
# determine new height and width
scale_height = self.__height / height
scale_width = self.__width / width
if self.__keep_aspect_ratio:
if self.__resize_method == "lower_bound":
# scale such that output size is lower bound
if scale_width > scale_height:
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
elif self.__resize_method == "upper_bound":
# scale such that output size is upper bound
if scale_width < scale_height:
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
elif self.__resize_method == "minimal":
# scale as least as possbile
if abs(1 - scale_width) < abs(1 - scale_height):
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
else:
raise ValueError(f"resize_method {self.__resize_method} not implemented")
if self.__resize_method == "lower_bound":
new_height = self.constrain_to_multiple_of(scale_height * height, min_val=self.__height)
new_width = self.constrain_to_multiple_of(scale_width * width, min_val=self.__width)
elif self.__resize_method == "upper_bound":
new_height = self.constrain_to_multiple_of(scale_height * height, max_val=self.__height)
new_width = self.constrain_to_multiple_of(scale_width * width, max_val=self.__width)
elif self.__resize_method == "minimal":
new_height = self.constrain_to_multiple_of(scale_height * height)
new_width = self.constrain_to_multiple_of(scale_width * width)
else:
raise ValueError(f"resize_method {self.__resize_method} not implemented")
return (new_width, new_height)
def __call__(self, sample):
width, height = self.get_size(sample["image"].shape[1], sample["image"].shape[0])
# resize sample
sample["image"] = cv2.resize(
sample["image"],
(width, height),
interpolation=self.__image_interpolation_method,
)
if self.__resize_target:
if "disparity" in sample:
sample["disparity"] = cv2.resize(
sample["disparity"],
(width, height),
interpolation=cv2.INTER_NEAREST,
)
if "depth" in sample:
sample["depth"] = cv2.resize(sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST)
if "semseg_mask" in sample:
# sample["semseg_mask"] = cv2.resize(
# sample["semseg_mask"], (width, height), interpolation=cv2.INTER_NEAREST
# )
sample["semseg_mask"] = F.interpolate(
torch.from_numpy(sample["semseg_mask"]).float()[None, None, ...], (height, width), mode="nearest"
).numpy()[0, 0]
if "mask" in sample:
sample["mask"] = cv2.resize(
sample["mask"].astype(np.float32),
(width, height),
interpolation=cv2.INTER_NEAREST,
)
# sample["mask"] = sample["mask"].astype(bool)
# print(sample['image'].shape, sample['depth'].shape)
return sample
class NormalizeImage(object):
"""Normlize image by given mean and std."""
def __init__(self, mean, std):
self.__mean = mean
self.__std = std
def __call__(self, sample):
sample["image"] = (sample["image"] - self.__mean) / self.__std
return sample
class PrepareForNet(object):
"""Prepare sample for usage as network input."""
def __init__(self):
pass
def __call__(self, sample):
image = np.transpose(sample["image"], (2, 0, 1))
sample["image"] = np.ascontiguousarray(image).astype(np.float32)
if "mask" in sample:
sample["mask"] = sample["mask"].astype(np.float32)
sample["mask"] = np.ascontiguousarray(sample["mask"])
if "depth" in sample:
depth = sample["depth"].astype(np.float32)
sample["depth"] = np.ascontiguousarray(depth)
if "semseg_mask" in sample:
sample["semseg_mask"] = sample["semseg_mask"].astype(np.float32)
sample["semseg_mask"] = np.ascontiguousarray(sample["semseg_mask"])
return sample

View File

@ -0,0 +1,281 @@
"""Utility (backend) functions used by model_install.py"""
import re
from logging import Logger
from pathlib import Path
from typing import Any, Dict, List, Optional
import omegaconf
from huggingface_hub import HfFolder
from pydantic import BaseModel, Field
from pydantic.dataclasses import dataclass
from pydantic.networks import AnyHttpUrl
from requests import HTTPError
from tqdm import tqdm
import invokeai.configs as configs
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.download import DownloadQueueService
from invokeai.app.services.events.events_base import EventServiceBase
from invokeai.app.services.image_files.image_files_disk import DiskImageFileStorage
from invokeai.app.services.model_install import (
HFModelSource,
LocalModelSource,
ModelInstallService,
ModelInstallServiceBase,
ModelSource,
URLModelSource,
)
from invokeai.app.services.model_records import ModelRecordServiceBase, ModelRecordServiceSQL
from invokeai.app.services.shared.sqlite.sqlite_util import init_db
from invokeai.backend.model_manager import (
BaseModelType,
InvalidModelConfigException,
ModelType,
)
from invokeai.backend.model_manager.metadata import UnknownMetadataException
from invokeai.backend.util.logging import InvokeAILogger
# name of the starter models file
INITIAL_MODELS = "INITIAL_MODELS2.yaml"
def initialize_record_store(app_config: InvokeAIAppConfig) -> ModelRecordServiceBase:
"""Return an initialized ModelConfigRecordServiceBase object."""
logger = InvokeAILogger.get_logger(config=app_config)
image_files = DiskImageFileStorage(f"{app_config.output_path}/images")
db = init_db(config=app_config, logger=logger, image_files=image_files)
obj: ModelRecordServiceBase = ModelRecordServiceSQL(db)
return obj
def initialize_installer(
app_config: InvokeAIAppConfig, event_bus: Optional[EventServiceBase] = None
) -> ModelInstallServiceBase:
"""Return an initialized ModelInstallService object."""
record_store = initialize_record_store(app_config)
metadata_store = record_store.metadata_store
download_queue = DownloadQueueService()
installer = ModelInstallService(
app_config=app_config,
record_store=record_store,
metadata_store=metadata_store,
download_queue=download_queue,
event_bus=event_bus,
)
download_queue.start()
installer.start()
return installer
class UnifiedModelInfo(BaseModel):
"""Catchall class for information in INITIAL_MODELS2.yaml."""
name: Optional[str] = None
base: Optional[BaseModelType] = None
type: Optional[ModelType] = None
source: Optional[str] = None
subfolder: Optional[str] = None
description: Optional[str] = None
recommended: bool = False
installed: bool = False
default: bool = False
requires: List[str] = Field(default_factory=list)
@dataclass
class InstallSelections:
"""Lists of models to install and remove."""
install_models: List[UnifiedModelInfo] = Field(default_factory=list)
remove_models: List[str] = Field(default_factory=list)
class TqdmEventService(EventServiceBase):
"""An event service to track downloads."""
def __init__(self) -> None:
"""Create a new TqdmEventService object."""
super().__init__()
self._bars: Dict[str, tqdm] = {}
self._last: Dict[str, int] = {}
def dispatch(self, event_name: str, payload: Any) -> None:
"""Dispatch an event by appending it to self.events."""
if payload["event"] == "model_install_downloading":
data = payload["data"]
dest = data["local_path"]
total_bytes = data["total_bytes"]
bytes = data["bytes"]
if dest not in self._bars:
self._bars[dest] = tqdm(desc=Path(dest).name, initial=0, total=total_bytes, unit="iB", unit_scale=True)
self._last[dest] = 0
self._bars[dest].update(bytes - self._last[dest])
self._last[dest] = bytes
class InstallHelper(object):
"""Capture information stored jointly in INITIAL_MODELS.yaml and the installed models db."""
def __init__(self, app_config: InvokeAIAppConfig, logger: Logger):
"""Create new InstallHelper object."""
self._app_config = app_config
self.all_models: Dict[str, UnifiedModelInfo] = {}
omega = omegaconf.OmegaConf.load(Path(configs.__path__[0]) / INITIAL_MODELS)
assert isinstance(omega, omegaconf.dictconfig.DictConfig)
self._installer = initialize_installer(app_config, TqdmEventService())
self._initial_models = omega
self._installed_models: List[str] = []
self._starter_models: List[str] = []
self._default_model: Optional[str] = None
self._logger = logger
self._initialize_model_lists()
@property
def installer(self) -> ModelInstallServiceBase:
"""Return the installer object used internally."""
return self._installer
def _initialize_model_lists(self) -> None:
"""
Initialize our model slots.
Set up the following:
installed_models -- list of installed model keys
starter_models -- list of starter model keys from INITIAL_MODELS
all_models -- dict of key => UnifiedModelInfo
default_model -- key to default model
"""
# previously-installed models
for model in self._installer.record_store.all_models():
info = UnifiedModelInfo.parse_obj(model.dict())
info.installed = True
model_key = f"{model.base.value}/{model.type.value}/{model.name}"
self.all_models[model_key] = info
self._installed_models.append(model_key)
for key in self._initial_models.keys():
assert isinstance(key, str)
if key in self.all_models:
# we want to preserve the description
description = self.all_models[key].description or self._initial_models[key].get("description")
self.all_models[key].description = description
else:
base_model, model_type, model_name = key.split("/")
info = UnifiedModelInfo(
name=model_name,
type=ModelType(model_type),
base=BaseModelType(base_model),
source=self._initial_models[key].source,
description=self._initial_models[key].get("description"),
recommended=self._initial_models[key].get("recommended", False),
default=self._initial_models[key].get("default", False),
subfolder=self._initial_models[key].get("subfolder"),
requires=list(self._initial_models[key].get("requires", [])),
)
self.all_models[key] = info
if not self.default_model():
self._default_model = key
elif self._initial_models[key].get("default", False):
self._default_model = key
self._starter_models.append(key)
# previously-installed models
for model in self._installer.record_store.all_models():
info = UnifiedModelInfo.parse_obj(model.dict())
info.installed = True
model_key = f"{model.base.value}/{model.type.value}/{model.name}"
self.all_models[model_key] = info
self._installed_models.append(model_key)
def recommended_models(self) -> List[UnifiedModelInfo]:
"""List of the models recommended in INITIAL_MODELS.yaml."""
return [self._to_model(x) for x in self._starter_models if self._to_model(x).recommended]
def installed_models(self) -> List[UnifiedModelInfo]:
"""List of models already installed."""
return [self._to_model(x) for x in self._installed_models]
def starter_models(self) -> List[UnifiedModelInfo]:
"""List of starter models."""
return [self._to_model(x) for x in self._starter_models]
def default_model(self) -> Optional[UnifiedModelInfo]:
"""Return the default model."""
return self._to_model(self._default_model) if self._default_model else None
def _to_model(self, key: str) -> UnifiedModelInfo:
return self.all_models[key]
def _add_required_models(self, model_list: List[UnifiedModelInfo]) -> None:
installed = {x.source for x in self.installed_models()}
reverse_source = {x.source: x for x in self.all_models.values()}
additional_models: List[UnifiedModelInfo] = []
for model_info in model_list:
for requirement in model_info.requires:
if requirement not in installed and reverse_source.get(requirement):
additional_models.append(reverse_source[requirement])
model_list.extend(additional_models)
def _make_install_source(self, model_info: UnifiedModelInfo) -> ModelSource:
assert model_info.source
model_path_id_or_url = model_info.source.strip("\"' ")
model_path = Path(model_path_id_or_url)
if model_path.exists(): # local file on disk
return LocalModelSource(path=model_path.absolute(), inplace=True)
if re.match(r"^[^/]+/[^/]+$", model_path_id_or_url): # hugging face repo_id
return HFModelSource(
repo_id=model_path_id_or_url,
access_token=HfFolder.get_token(),
subfolder=model_info.subfolder,
)
if re.match(r"^(http|https):", model_path_id_or_url):
return URLModelSource(url=AnyHttpUrl(model_path_id_or_url))
raise ValueError(f"Unsupported model source: {model_path_id_or_url}")
def add_or_delete(self, selections: InstallSelections) -> None:
"""Add or delete selected models."""
installer = self._installer
self._add_required_models(selections.install_models)
for model in selections.install_models:
source = self._make_install_source(model)
config = (
{
"description": model.description,
"name": model.name,
}
if model.name
else None
)
try:
installer.import_model(
source=source,
config=config,
)
except (UnknownMetadataException, InvalidModelConfigException, HTTPError, OSError) as e:
self._logger.warning(f"{source}: {e}")
for model_to_remove in selections.remove_models:
parts = model_to_remove.split("/")
if len(parts) == 1:
base_model, model_type, model_name = (None, None, model_to_remove)
else:
base_model, model_type, model_name = parts
matches = installer.record_store.search_by_attr(
base_model=BaseModelType(base_model) if base_model else None,
model_type=ModelType(model_type) if model_type else None,
model_name=model_name,
)
if len(matches) > 1:
print(f"{model} is ambiguous. Please use model_type:model_name (e.g. main:my_model) to disambiguate.")
elif not matches:
print(f"{model}: unknown model")
else:
for m in matches:
print(f"Deleting {m.type}:{m.name}")
installer.delete(m.key)
installer.wait_for_installs()

View File

@ -849,7 +849,7 @@ def migrate_if_needed(opt: Namespace, root: Path) -> bool:
# -------------------------------------
def main():
def main() -> None:
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
parser.add_argument(
"--skip-sd-weights",

View File

@ -104,12 +104,14 @@ class ModelInstall(object):
prediction_type_helper: Optional[Callable[[Path], SchedulerPredictionType]] = None,
model_manager: Optional[ModelManager] = None,
access_token: Optional[str] = None,
civitai_api_key: Optional[str] = None,
):
self.config = config
self.mgr = model_manager or ModelManager(config.model_conf_path)
self.datasets = OmegaConf.load(Dataset_path)
self.prediction_helper = prediction_type_helper
self.access_token = access_token or HfFolder.get_token()
self.civitai_api_key = civitai_api_key or config.civitai_api_key
self.reverse_paths = self._reverse_paths(self.datasets)
def all_models(self) -> Dict[str, ModelLoadInfo]:
@ -326,7 +328,11 @@ class ModelInstall(object):
def _install_url(self, url: str) -> AddModelResult:
with TemporaryDirectory(dir=self.config.models_path) as staging:
location = download_with_resume(url, Path(staging))
CIVITAI_RE = r".*civitai.com.*"
civit_url = re.match(CIVITAI_RE, url, re.IGNORECASE)
location = download_with_resume(
url, Path(staging), access_token=self.civitai_api_key if civit_url else None
)
if not location:
logger.error(f"Unable to download {url}. Skipping.")
info = ModelProbe().heuristic_probe(location, self.prediction_helper)

View File

@ -141,7 +141,7 @@ class StableDiffusionXLModel(DiffusersModel):
version=base_model,
model_config=config,
output_path=output_path,
use_safetensors=False, # corrupts sdxl models for some reason
use_safetensors=True,
**kwargs,
)
else:

View File

@ -0,0 +1,177 @@
"""
invokeai.backend.model_manager.merge exports:
merge_diffusion_models() -- combine multiple models by location and return a pipeline object
merge_diffusion_models_and_commit() -- combine multiple models by ModelManager ID and write to models.yaml
Copyright (c) 2023 Lincoln Stein and the InvokeAI Development Team
"""
import warnings
from enum import Enum
from pathlib import Path
from typing import Any, List, Optional, Set
import torch
from diffusers import AutoPipelineForText2Image
from diffusers import logging as dlogging
from invokeai.app.services.model_install import ModelInstallServiceBase
from invokeai.backend.util.devices import choose_torch_device, torch_dtype
from . import (
AnyModelConfig,
BaseModelType,
ModelType,
ModelVariantType,
)
from .config import MainDiffusersConfig
class MergeInterpolationMethod(str, Enum):
WeightedSum = "weighted_sum"
Sigmoid = "sigmoid"
InvSigmoid = "inv_sigmoid"
AddDifference = "add_difference"
class ModelMerger(object):
"""Wrapper class for model merge function."""
def __init__(self, installer: ModelInstallServiceBase):
"""
Initialize a ModelMerger object.
:param store: Underlying storage manager for the running process.
:param config: InvokeAIAppConfig object (if not provided, default will be selected).
"""
self._installer = installer
def merge_diffusion_models(
self,
model_paths: List[Path],
alpha: float = 0.5,
interp: Optional[MergeInterpolationMethod] = None,
force: bool = False,
variant: Optional[str] = None,
**kwargs: Any,
) -> Any: # pipe.merge is an untyped function.
"""
:param model_paths: up to three models, designated by their local paths or HuggingFace repo_ids
:param alpha: The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
:param interp: The interpolation method to use for the merging. Supports "sigmoid", "inv_sigmoid", "add_difference" and None.
Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_difference" is supported.
:param force: Whether to ignore mismatch in model_config.json for the current models. Defaults to False.
**kwargs - the default DiffusionPipeline.get_config_dict kwargs:
cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
verbosity = dlogging.get_verbosity()
dlogging.set_verbosity_error()
dtype = torch.float16 if variant == "fp16" else torch_dtype(choose_torch_device())
# Note that checkpoint_merger will not work with downloaded HuggingFace fp16 models
# until upstream https://github.com/huggingface/diffusers/pull/6670 is merged and released.
pipe = AutoPipelineForText2Image.from_pretrained(
model_paths[0],
custom_pipeline="checkpoint_merger",
torch_dtype=dtype,
variant=variant,
)
merged_pipe = pipe.merge(
pretrained_model_name_or_path_list=model_paths,
alpha=alpha,
interp=interp.value if interp else None, # diffusers API treats None as "weighted sum"
force=force,
torch_dtype=dtype,
variant=variant,
**kwargs,
)
dlogging.set_verbosity(verbosity)
return merged_pipe
def merge_diffusion_models_and_save(
self,
model_keys: List[str],
merged_model_name: str,
alpha: float = 0.5,
force: bool = False,
interp: Optional[MergeInterpolationMethod] = None,
merge_dest_directory: Optional[Path] = None,
variant: Optional[str] = None,
**kwargs: Any,
) -> AnyModelConfig:
"""
:param models: up to three models, designated by their InvokeAI models.yaml model name
:param merged_model_name: name for new model
:param alpha: The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
:param interp: The interpolation method to use for the merging. Supports "weighted_average", "sigmoid", "inv_sigmoid", "add_difference" and None.
Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_difference" is supported. Add_difference is A+(B-C).
:param force: Whether to ignore mismatch in model_config.json for the current models. Defaults to False.
:param merge_dest_directory: Save the merged model to the designated directory (with 'merged_model_name' appended)
**kwargs - the default DiffusionPipeline.get_config_dict kwargs:
cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map
"""
model_paths: List[Path] = []
model_names: List[str] = []
config = self._installer.app_config
store = self._installer.record_store
base_models: Set[BaseModelType] = set()
vae = None
variant = None if self._installer.app_config.full_precision else "fp16"
assert (
len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference
), "When merging three models, only the 'add_difference' merge method is supported"
for key in model_keys:
info = store.get_model(key)
model_names.append(info.name)
assert isinstance(
info, MainDiffusersConfig
), f"{info.name} ({info.key}) is not a diffusers model. It must be optimized before merging"
assert info.variant == ModelVariantType(
"normal"
), f"{info.name} ({info.key}) is a {info.variant} model, which cannot currently be merged"
# pick up the first model's vae
if key == model_keys[0]:
vae = info.vae
# tally base models used
base_models.add(info.base)
model_paths.extend([config.models_path / info.path])
assert len(base_models) == 1, f"All models to merge must have same base model, but found bases {base_models}"
base_model = base_models.pop()
merge_method = None if interp == "weighted_sum" else MergeInterpolationMethod(interp)
merged_pipe = self.merge_diffusion_models(model_paths, alpha, merge_method, force, variant=variant, **kwargs)
dump_path = (
Path(merge_dest_directory)
if merge_dest_directory
else config.models_path / base_model.value / ModelType.Main.value
)
dump_path.mkdir(parents=True, exist_ok=True)
dump_path = dump_path / merged_model_name
dtype = torch.float16 if variant == "fp16" else torch_dtype(choose_torch_device())
merged_pipe.save_pretrained(dump_path.as_posix(), safe_serialization=True, torch_dtype=dtype, variant=variant)
# register model and get its unique key
key = self._installer.register_path(dump_path)
# update model's config
model_config = self._installer.record_store.get_model(key)
model_config.update(
{
"name": merged_model_name,
"description": f"Merge of models {', '.join(model_names)}",
"vae": vae,
}
)
self._installer.record_store.update_model(key, model_config)
return model_config

View File

@ -170,6 +170,8 @@ class CivitaiMetadataFetch(ModelMetadataFetchBase):
if model_id is None:
version_url = CIVITAI_VERSION_ENDPOINT + str(version_id)
version = self._requests.get(version_url).json()
if error := version.get("error"):
raise UnknownMetadataException(error)
model_id = version["modelId"]
model_url = CIVITAI_MODEL_ENDPOINT + str(model_id)

View File

@ -12,7 +12,7 @@ import psutil
import torch
from compel.cross_attention_control import Arguments
from diffusers.models.attention_processor import Attention, AttentionProcessor, AttnProcessor, SlicedAttnProcessor
from diffusers.models.unet_2d_condition import UNet2DConditionModel
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
from torch import nn
import invokeai.backend.util.logging as logger

View File

@ -11,6 +11,7 @@ import logging
import math
import os
import random
from argparse import Namespace
from pathlib import Path
from typing import Optional
@ -30,8 +31,6 @@ from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version
from diffusers.utils.import_utils import is_xformers_available
from huggingface_hub import HfFolder, Repository, whoami
# TODO: remove and import from diffusers.utils when the new version of diffusers is released
from packaging import version
from PIL import Image
from torch.utils.data import Dataset
@ -41,8 +40,8 @@ from transformers import CLIPTextModel, CLIPTokenizer
# invokeai stuff
from invokeai.app.services.config import InvokeAIAppConfig, PagingArgumentParser
from invokeai.app.services.model_manager import ModelManagerService
from invokeai.backend.model_management.models import SubModelType
from invokeai.backend.install.install_helper import initialize_record_store
from invokeai.backend.model_manager import BaseModelType, ModelType
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
PIL_INTERPOLATION = {
@ -77,7 +76,7 @@ def save_progress(text_encoder, placeholder_token_id, accelerator, placeholder_t
torch.save(learned_embeds_dict, save_path)
def parse_args():
def parse_args() -> Namespace:
config = InvokeAIAppConfig.get_config()
parser = PagingArgumentParser(description="Textual inversion training")
general_group = parser.add_argument_group("General")
@ -444,7 +443,7 @@ class TextualInversionDataset(Dataset):
self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
def __len__(self):
def __len__(self) -> int:
return self._length
def __getitem__(self, i):
@ -509,11 +508,10 @@ def do_textual_inversion_training(
initializer_token: str,
save_steps: int = 500,
only_save_embeds: bool = False,
revision: str = None,
tokenizer_name: str = None,
tokenizer_name: Optional[str] = None,
learnable_property: str = "object",
repeats: int = 100,
seed: int = None,
seed: Optional[int] = None,
resolution: int = 512,
center_crop: bool = False,
train_batch_size: int = 16,
@ -530,18 +528,18 @@ def do_textual_inversion_training(
adam_weight_decay: float = 1e-02,
adam_epsilon: float = 1e-08,
push_to_hub: bool = False,
hub_token: str = None,
hub_token: Optional[str] = None,
logging_dir: Path = Path("logs"),
mixed_precision: str = "fp16",
allow_tf32: bool = False,
report_to: str = "tensorboard",
local_rank: int = -1,
checkpointing_steps: int = 500,
resume_from_checkpoint: Path = None,
resume_from_checkpoint: Optional[Path] = None,
enable_xformers_memory_efficient_attention: bool = False,
hub_model_id: str = None,
hub_model_id: Optional[str] = None,
**kwargs,
):
) -> None:
assert model, "Please specify a base model with --model"
assert train_data_dir, "Please specify a directory containing the training images using --train_data_dir"
assert placeholder_token, "Please specify a trigger term using --placeholder_token"
@ -564,8 +562,6 @@ def do_textual_inversion_training(
project_config=accelerator_config,
)
model_manager = ModelManagerService(config, logger)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
@ -603,44 +599,37 @@ def do_textual_inversion_training(
elif output_dir is not None:
os.makedirs(output_dir, exist_ok=True)
known_models = model_manager.model_names()
model_name = model.split("/")[-1]
model_meta = next((mm for mm in known_models if mm[0].endswith(model_name)), None)
assert model_meta is not None, f"Unknown model: {model}"
model_info = model_manager.model_info(*model_meta)
assert model_info["model_format"] == "diffusers", "This script only works with models of type 'diffusers'"
tokenizer_info = model_manager.get_model(*model_meta, submodel=SubModelType.Tokenizer)
noise_scheduler_info = model_manager.get_model(*model_meta, submodel=SubModelType.Scheduler)
text_encoder_info = model_manager.get_model(*model_meta, submodel=SubModelType.TextEncoder)
vae_info = model_manager.get_model(*model_meta, submodel=SubModelType.Vae)
unet_info = model_manager.get_model(*model_meta, submodel=SubModelType.UNet)
model_records = initialize_record_store(config)
base, type, name = model.split("/") # note frontend still returns old-style keys
try:
model_config = model_records.search_by_attr(
model_name=name, model_type=ModelType(type), base_model=BaseModelType(base)
)[0]
except IndexError:
raise Exception(f"Unknown model {model}")
model_path = config.models_path / model_config.path
pipeline_args = {"local_files_only": True}
if tokenizer_name:
tokenizer = CLIPTokenizer.from_pretrained(tokenizer_name, **pipeline_args)
else:
tokenizer = CLIPTokenizer.from_pretrained(tokenizer_info.location, subfolder="tokenizer", **pipeline_args)
tokenizer = CLIPTokenizer.from_pretrained(model_path, subfolder="tokenizer", **pipeline_args)
# Load scheduler and models
noise_scheduler = DDPMScheduler.from_pretrained(
noise_scheduler_info.location, subfolder="scheduler", **pipeline_args
)
noise_scheduler = DDPMScheduler.from_pretrained(model_path, subfolder="scheduler", **pipeline_args)
text_encoder = CLIPTextModel.from_pretrained(
text_encoder_info.location,
model_path,
subfolder="text_encoder",
revision=revision,
**pipeline_args,
)
vae = AutoencoderKL.from_pretrained(
vae_info.location,
model_path,
subfolder="vae",
revision=revision,
**pipeline_args,
)
unet = UNet2DConditionModel.from_pretrained(
unet_info.location,
model_path,
subfolder="unet",
revision=revision,
**pipeline_args,
)
@ -728,7 +717,7 @@ def do_textual_inversion_training(
max_train_steps = num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
scheduler = get_scheduler(
lr_scheduler,
optimizer=optimizer,
num_warmup_steps=lr_warmup_steps * gradient_accumulation_steps,
@ -737,7 +726,7 @@ def do_textual_inversion_training(
# Prepare everything with our `accelerator`.
text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
text_encoder, optimizer, train_dataloader, lr_scheduler
text_encoder, optimizer, train_dataloader, scheduler
)
# For mixed precision training we cast the unet and vae weights to half-precision
@ -863,7 +852,7 @@ def do_textual_inversion_training(
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
scheduler.step()
optimizer.zero_grad()
# Let's make sure we don't update any embedding weights besides the newly added token
@ -893,7 +882,7 @@ def do_textual_inversion_training(
accelerator.save_state(save_path)
logger.info(f"Saved state to {save_path}")
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
logs = {"loss": loss.detach().item(), "lr": scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
@ -910,7 +899,7 @@ def do_textual_inversion_training(
save_full_model = not only_save_embeds
if save_full_model:
pipeline = StableDiffusionPipeline.from_pretrained(
unet_info.location,
model_path,
text_encoder=accelerator.unwrap_model(text_encoder),
vae=vae,
unet=unet,

View File

@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, Tuple, Union
import diffusers
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.loaders import FromOriginalControlnetMixin
from diffusers.loaders import FromOriginalControlNetMixin
from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor
from diffusers.models.controlnet import ControlNetConditioningEmbedding, ControlNetOutput, zero_module
from diffusers.models.embeddings import (
@ -14,8 +14,13 @@ from diffusers.models.embeddings import (
Timesteps,
)
from diffusers.models.modeling_utils import ModelMixin
from diffusers.models.unet_2d_blocks import CrossAttnDownBlock2D, DownBlock2D, UNetMidBlock2DCrossAttn, get_down_block
from diffusers.models.unet_2d_condition import UNet2DConditionModel
from diffusers.models.unets.unet_2d_blocks import (
CrossAttnDownBlock2D,
DownBlock2D,
UNetMidBlock2DCrossAttn,
get_down_block,
)
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
from torch import nn
from invokeai.backend.util.logging import InvokeAILogger
@ -27,7 +32,7 @@ from invokeai.backend.util.logging import InvokeAILogger
logger = InvokeAILogger.get_logger(__name__)
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin):
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlNetMixin):
"""
A ControlNet model.

View File

@ -286,7 +286,7 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path
open_mode = "wb"
exist_size = 0
resp = requests.get(url, header, stream=True)
resp = requests.get(url, headers=header, stream=True, allow_redirects=True)
content_length = int(resp.headers.get("content-length", 0))
if dest.is_dir():

View File

@ -0,0 +1,157 @@
# This file predefines a few models that the user may want to install.
sd-1/main/stable-diffusion-v1-5:
description: Stable Diffusion version 1.5 diffusers model (4.27 GB)
source: runwayml/stable-diffusion-v1-5
recommended: True
default: True
sd-1/main/stable-diffusion-v1-5-inpainting:
description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)
source: runwayml/stable-diffusion-inpainting
recommended: True
sd-2/main/stable-diffusion-2-1:
description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)
source: stabilityai/stable-diffusion-2-1
recommended: False
sd-2/main/stable-diffusion-2-inpainting:
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
source: stabilityai/stable-diffusion-2-inpainting
recommended: False
sdxl/main/stable-diffusion-xl-base-1-0:
description: Stable Diffusion XL base model (12 GB)
source: stabilityai/stable-diffusion-xl-base-1.0
recommended: True
sdxl-refiner/main/stable-diffusion-xl-refiner-1-0:
description: Stable Diffusion XL refiner model (12 GB)
source: stabilityai/stable-diffusion-xl-refiner-1.0
recommended: False
sdxl/vae/sdxl-vae-fp16-fix:
description: Version of the SDXL-1.0 VAE that works in half precision mode
source: madebyollin/sdxl-vae-fp16-fix
recommended: True
sd-1/main/Analog-Diffusion:
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
source: wavymulder/Analog-Diffusion
recommended: False
sd-1/main/Deliberate:
description: Versatile model that produces detailed images up to 768px (4.27 GB)
source: XpucT/Deliberate
recommended: False
sd-1/main/Dungeons-and-Diffusion:
description: Dungeons & Dragons characters (2.13 GB)
source: 0xJustin/Dungeons-and-Diffusion
recommended: False
sd-1/main/dreamlike-photoreal-2:
description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)
source: dreamlike-art/dreamlike-photoreal-2.0
recommended: False
sd-1/main/Inkpunk-Diffusion:
description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)
source: Envvi/Inkpunk-Diffusion
recommended: False
sd-1/main/openjourney:
description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)
source: prompthero/openjourney
recommended: False
sd-1/main/seek.art_MEGA:
source: coreco/seek.art_MEGA
description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)
recommended: False
sd-1/main/trinart_stable_diffusion_v2:
description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)
source: naclbit/trinart_stable_diffusion_v2
recommended: False
sd-1/controlnet/qrcode_monster:
source: monster-labs/control_v1p_sd15_qrcode_monster
subfolder: v2
sd-1/controlnet/canny:
source: lllyasviel/control_v11p_sd15_canny
recommended: True
sd-1/controlnet/inpaint:
source: lllyasviel/control_v11p_sd15_inpaint
sd-1/controlnet/mlsd:
source: lllyasviel/control_v11p_sd15_mlsd
sd-1/controlnet/depth:
source: lllyasviel/control_v11f1p_sd15_depth
recommended: True
sd-1/controlnet/normal_bae:
source: lllyasviel/control_v11p_sd15_normalbae
sd-1/controlnet/seg:
source: lllyasviel/control_v11p_sd15_seg
sd-1/controlnet/lineart:
source: lllyasviel/control_v11p_sd15_lineart
recommended: True
sd-1/controlnet/lineart_anime:
source: lllyasviel/control_v11p_sd15s2_lineart_anime
sd-1/controlnet/openpose:
source: lllyasviel/control_v11p_sd15_openpose
recommended: True
sd-1/controlnet/scribble:
source: lllyasviel/control_v11p_sd15_scribble
recommended: False
sd-1/controlnet/softedge:
source: lllyasviel/control_v11p_sd15_softedge
sd-1/controlnet/shuffle:
source: lllyasviel/control_v11e_sd15_shuffle
sd-1/controlnet/tile:
source: lllyasviel/control_v11f1e_sd15_tile
sd-1/controlnet/ip2p:
source: lllyasviel/control_v11e_sd15_ip2p
sd-1/t2i_adapter/canny-sd15:
source: TencentARC/t2iadapter_canny_sd15v2
sd-1/t2i_adapter/sketch-sd15:
source: TencentARC/t2iadapter_sketch_sd15v2
sd-1/t2i_adapter/depth-sd15:
source: TencentARC/t2iadapter_depth_sd15v2
sd-1/t2i_adapter/zoedepth-sd15:
source: TencentARC/t2iadapter_zoedepth_sd15v1
sdxl/t2i_adapter/canny-sdxl:
source: TencentARC/t2i-adapter-canny-sdxl-1.0
sdxl/t2i_adapter/zoedepth-sdxl:
source: TencentARC/t2i-adapter-depth-zoe-sdxl-1.0
sdxl/t2i_adapter/lineart-sdxl:
source: TencentARC/t2i-adapter-lineart-sdxl-1.0
sdxl/t2i_adapter/sketch-sdxl:
source: TencentARC/t2i-adapter-sketch-sdxl-1.0
sd-1/embedding/EasyNegative:
source: https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors
recommended: True
description: A textual inversion to use in the negative prompt to reduce bad anatomy
sd-1/lora/FlatColor:
source: https://civitai.com/models/6433/loraflatcolor
recommended: True
description: A LoRA that generates scenery using solid blocks of color
sd-1/lora/Ink scenery:
source: https://civitai.com/api/download/models/83390
description: Generate india ink-like landscapes
sd-1/ip_adapter/ip_adapter_sd15:
source: InvokeAI/ip_adapter_sd15
recommended: True
requires:
- InvokeAI/ip_adapter_sd_image_encoder
description: IP-Adapter for SD 1.5 models
sd-1/ip_adapter/ip_adapter_plus_sd15:
source: InvokeAI/ip_adapter_plus_sd15
recommended: False
requires:
- InvokeAI/ip_adapter_sd_image_encoder
description: Refined IP-Adapter for SD 1.5 models
sd-1/ip_adapter/ip_adapter_plus_face_sd15:
source: InvokeAI/ip_adapter_plus_face_sd15
recommended: False
requires:
- InvokeAI/ip_adapter_sd_image_encoder
description: Refined IP-Adapter for SD 1.5 models, adapted for faces
sdxl/ip_adapter/ip_adapter_sdxl:
source: InvokeAI/ip_adapter_sdxl
recommended: False
requires:
- InvokeAI/ip_adapter_sdxl_image_encoder
description: IP-Adapter for SDXL models
any/clip_vision/ip_adapter_sd_image_encoder:
source: InvokeAI/ip_adapter_sd_image_encoder
recommended: False
description: Required model for using IP-Adapters with SD-1/2 models
any/clip_vision/ip_adapter_sdxl_image_encoder:
source: InvokeAI/ip_adapter_sdxl_image_encoder
recommended: False
description: Required model for using IP-Adapters with SDXL models

View File

@ -2,3 +2,5 @@
Wrapper for invokeai.backend.configure.invokeai_configure
"""
from ...backend.install.invokeai_configure import main as invokeai_configure # noqa: F401
__all__ = ["invokeai_configure"]

View File

@ -5,14 +5,14 @@ pip install <path_to_git_source>.
import os
import platform
from distutils.version import LooseVersion
from importlib.metadata import PackageNotFoundError, distribution, distributions
import pkg_resources
import psutil
import requests
from rich import box, print
from rich.console import Console, group
from rich.panel import Panel
from rich.prompt import Prompt
from rich.prompt import Confirm, Prompt
from rich.style import Style
from invokeai.version import __version__
@ -61,6 +61,65 @@ def get_pypi_versions():
return latest_version, latest_release_candidate, versions
def get_torch_extra_index_url() -> str | None:
"""
Determine torch wheel source URL and optional modules based on the user's OS.
"""
resolved_url = None
# In all other cases (like MacOS (MPS) or Linux+CUDA), there is no need to specify the extra index URL.
torch_package_urls = {
"windows_cuda": "https://download.pytorch.org/whl/cu121",
"linux_rocm": "https://download.pytorch.org/whl/rocm5.6",
"linux_cpu": "https://download.pytorch.org/whl/cpu",
}
nvidia_packages_present = (
len([d.metadata["Name"] for d in distributions() if d.metadata["Name"].startswith("nvidia")]) > 0
)
device = "cuda" if nvidia_packages_present else None
manual_gpu_selection_prompt = (
"[bold]We tried and failed to guess your GPU capabilities[/] :thinking_face:. Please select the GPU type:"
)
if OS == "Linux":
if not device:
# do we even need to offer a CPU-only install option?
print(manual_gpu_selection_prompt)
print("1: NVIDIA (CUDA)")
print("2: AMD (ROCm)")
print("3: No GPU - CPU only")
answer = Prompt.ask("Choice:", choices=["1", "2", "3"], default="1")
match answer:
case "1":
device = "cuda"
case "2":
device = "rocm"
case "3":
device = "cpu"
if device != "cuda":
resolved_url = torch_package_urls[f"linux_{device}"]
if OS == "Windows":
if not device:
print(manual_gpu_selection_prompt)
print("1: NVIDIA (CUDA)")
print("2: No GPU - CPU only")
answer = Prompt.ask("Your choice:", choices=["1", "2"], default="1")
match answer:
case "1":
device = "cuda"
case "2":
device = "cpu"
if device == "cuda":
resolved_url = torch_package_urls[f"windows_{device}"]
return resolved_url
def welcome(latest_release: str, latest_prerelease: str):
@group()
def text():
@ -89,12 +148,11 @@ def welcome(latest_release: str, latest_prerelease: str):
def get_extras():
extras = ""
try:
_ = pkg_resources.get_distribution("xformers")
distribution("xformers")
extras = "[xformers]"
except pkg_resources.DistributionNotFound:
pass
except PackageNotFoundError:
extras = ""
return extras
@ -125,8 +183,22 @@ def main():
extras = get_extras()
console.line()
force_reinstall = Confirm.ask(
"[bold]Force reinstallation of all dependencies?[/] This [i]may[/] help fix a broken upgrade, but is usually not necessary.",
default=False,
)
console.line()
flags = []
if (index_url := get_torch_extra_index_url()) is not None:
flags.append(f"--extra-index-url {index_url}")
if force_reinstall:
flags.append("--force-reinstall")
flags = " ".join(flags)
print(f":crossed_fingers: Upgrading to [yellow]{release}[/yellow]")
cmd = f'pip install "invokeai{extras}=={release}" --use-pep517 --upgrade'
cmd = f'pip install "invokeai{extras}=={release}" --use-pep517 --upgrade {flags}'
print("")
print("")

View File

@ -0,0 +1,645 @@
#!/usr/bin/env python
# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
# Before running stable-diffusion on an internet-isolated machine,
# run this script from one with internet connectivity. The
# two machines must share a common .cache directory.
"""
This is the npyscreen frontend to the model installation application.
It is currently named model_install2.py, but will ultimately replace model_install.py.
"""
import argparse
import curses
import sys
import traceback
import warnings
from argparse import Namespace
from shutil import get_terminal_size
from typing import Any, Dict, List, Optional, Set
import npyscreen
import torch
from npyscreen import widget
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.model_install import ModelInstallService
from invokeai.backend.install.install_helper import InstallHelper, InstallSelections, UnifiedModelInfo
from invokeai.backend.model_manager import ModelType
from invokeai.backend.util import choose_precision, choose_torch_device
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.frontend.install.widgets import (
MIN_COLS,
MIN_LINES,
CenteredTitleText,
CyclingForm,
MultiSelectColumns,
SingleSelectColumns,
TextBox,
WindowTooSmallException,
set_min_terminal_size,
)
warnings.filterwarnings("ignore", category=UserWarning) # noqa: E402
config = InvokeAIAppConfig.get_config()
logger = InvokeAILogger.get_logger("ModelInstallService")
logger.setLevel("WARNING")
# logger.setLevel('DEBUG')
# build a table mapping all non-printable characters to None
# for stripping control characters
# from https://stackoverflow.com/questions/92438/stripping-non-printable-characters-from-a-string-in-python
NOPRINT_TRANS_TABLE = {i: None for i in range(0, sys.maxunicode + 1) if not chr(i).isprintable()}
# maximum number of installed models we can display before overflowing vertically
MAX_OTHER_MODELS = 72
def make_printable(s: str) -> str:
"""Replace non-printable characters in a string."""
return s.translate(NOPRINT_TRANS_TABLE)
class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
"""Main form for interactive TUI."""
# for responsive resizing set to False, but this seems to cause a crash!
FIX_MINIMUM_SIZE_WHEN_CREATED = True
# for persistence
current_tab = 0
def __init__(self, parentApp: npyscreen.NPSAppManaged, name: str, multipage: bool = False, **keywords: Any):
self.multipage = multipage
self.subprocess = None
super().__init__(parentApp=parentApp, name=name, **keywords)
def create(self) -> None:
self.installer = self.parentApp.install_helper.installer
self.model_labels = self._get_model_labels()
self.keypress_timeout = 10
self.counter = 0
self.subprocess_connection = None
window_width, window_height = get_terminal_size()
# npyscreen has no typing hints
self.nextrely -= 1 # type: ignore
self.add_widget_intelligent(
npyscreen.FixedText,
value="Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields. Cursor keys navigate, and <space> selects.",
editable=False,
color="CAUTION",
)
self.nextrely += 1 # type: ignore
self.tabs = self.add_widget_intelligent(
SingleSelectColumns,
values=[
"STARTERS",
"MAINS",
"CONTROLNETS",
"T2I-ADAPTERS",
"IP-ADAPTERS",
"LORAS",
"TI EMBEDDINGS",
],
value=[self.current_tab],
columns=7,
max_height=2,
relx=8,
scroll_exit=True,
)
self.tabs.on_changed = self._toggle_tables
top_of_table = self.nextrely # type: ignore
self.starter_pipelines = self.add_starter_pipelines()
bottom_of_table = self.nextrely # type: ignore
self.nextrely = top_of_table
self.pipeline_models = self.add_pipeline_widgets(
model_type=ModelType.Main, window_width=window_width, exclude=self.starter_models
)
# self.pipeline_models['autoload_pending'] = True
bottom_of_table = max(bottom_of_table, self.nextrely)
self.nextrely = top_of_table
self.controlnet_models = self.add_model_widgets(
model_type=ModelType.ControlNet,
window_width=window_width,
)
bottom_of_table = max(bottom_of_table, self.nextrely)
self.nextrely = top_of_table
self.t2i_models = self.add_model_widgets(
model_type=ModelType.T2IAdapter,
window_width=window_width,
)
bottom_of_table = max(bottom_of_table, self.nextrely)
self.nextrely = top_of_table
self.ipadapter_models = self.add_model_widgets(
model_type=ModelType.IPAdapter,
window_width=window_width,
)
bottom_of_table = max(bottom_of_table, self.nextrely)
self.nextrely = top_of_table
self.lora_models = self.add_model_widgets(
model_type=ModelType.Lora,
window_width=window_width,
)
bottom_of_table = max(bottom_of_table, self.nextrely)
self.nextrely = top_of_table
self.ti_models = self.add_model_widgets(
model_type=ModelType.TextualInversion,
window_width=window_width,
)
bottom_of_table = max(bottom_of_table, self.nextrely)
self.nextrely = bottom_of_table + 1
self.nextrely += 1
back_label = "BACK"
cancel_label = "CANCEL"
current_position = self.nextrely
if self.multipage:
self.back_button = self.add_widget_intelligent(
npyscreen.ButtonPress,
name=back_label,
when_pressed_function=self.on_back,
)
else:
self.nextrely = current_position
self.cancel_button = self.add_widget_intelligent(
npyscreen.ButtonPress, name=cancel_label, when_pressed_function=self.on_cancel
)
self.nextrely = current_position
label = "APPLY CHANGES"
self.nextrely = current_position
self.done = self.add_widget_intelligent(
npyscreen.ButtonPress,
name=label,
relx=window_width - len(label) - 15,
when_pressed_function=self.on_done,
)
# This restores the selected page on return from an installation
for _i in range(1, self.current_tab + 1):
self.tabs.h_cursor_line_down(1)
self._toggle_tables([self.current_tab])
############# diffusers tab ##########
def add_starter_pipelines(self) -> dict[str, npyscreen.widget]:
"""Add widgets responsible for selecting diffusers models"""
widgets: Dict[str, npyscreen.widget] = {}
all_models = self.all_models # master dict of all models, indexed by key
model_list = [x for x in self.starter_models if all_models[x].type in ["main", "vae"]]
model_labels = [self.model_labels[x] for x in model_list]
widgets.update(
label1=self.add_widget_intelligent(
CenteredTitleText,
name="Select from a starter set of Stable Diffusion models from HuggingFace and Civitae.",
editable=False,
labelColor="CAUTION",
)
)
self.nextrely -= 1
# if user has already installed some initial models, then don't patronize them
# by showing more recommendations
show_recommended = len(self.installed_models) == 0
checked = [
model_list.index(x)
for x in model_list
if (show_recommended and all_models[x].recommended) or all_models[x].installed
]
widgets.update(
models_selected=self.add_widget_intelligent(
MultiSelectColumns,
columns=1,
name="Install Starter Models",
values=model_labels,
value=checked,
max_height=len(model_list) + 1,
relx=4,
scroll_exit=True,
),
models=model_list,
)
self.nextrely += 1
return widgets
############# Add a set of model install widgets ########
def add_model_widgets(
self,
model_type: ModelType,
window_width: int = 120,
install_prompt: Optional[str] = None,
exclude: Optional[Set[str]] = None,
) -> dict[str, npyscreen.widget]:
"""Generic code to create model selection widgets"""
if exclude is None:
exclude = set()
widgets: Dict[str, npyscreen.widget] = {}
all_models = self.all_models
model_list = sorted(
[x for x in all_models if all_models[x].type == model_type and x not in exclude],
key=lambda x: all_models[x].name or "",
)
model_labels = [self.model_labels[x] for x in model_list]
show_recommended = len(self.installed_models) == 0
truncated = False
if len(model_list) > 0:
max_width = max([len(x) for x in model_labels])
columns = window_width // (max_width + 8) # 8 characters for "[x] " and padding
columns = min(len(model_list), columns) or 1
prompt = (
install_prompt
or f"Select the desired {model_type.value.title()} models to install. Unchecked models will be purged from disk."
)
widgets.update(
label1=self.add_widget_intelligent(
CenteredTitleText,
name=prompt,
editable=False,
labelColor="CAUTION",
)
)
if len(model_labels) > MAX_OTHER_MODELS:
model_labels = model_labels[0:MAX_OTHER_MODELS]
truncated = True
widgets.update(
models_selected=self.add_widget_intelligent(
MultiSelectColumns,
columns=columns,
name=f"Install {model_type} Models",
values=model_labels,
value=[
model_list.index(x)
for x in model_list
if (show_recommended and all_models[x].recommended) or all_models[x].installed
],
max_height=len(model_list) // columns + 1,
relx=4,
scroll_exit=True,
),
models=model_list,
)
if truncated:
widgets.update(
warning_message=self.add_widget_intelligent(
npyscreen.FixedText,
value=f"Too many models to display (max={MAX_OTHER_MODELS}). Some are not displayed.",
editable=False,
color="CAUTION",
)
)
self.nextrely += 1
widgets.update(
download_ids=self.add_widget_intelligent(
TextBox,
name="Additional URLs, or HuggingFace repo_ids to install (Space separated. Use shift-control-V to paste):",
max_height=6,
scroll_exit=True,
editable=True,
)
)
return widgets
### Tab for arbitrary diffusers widgets ###
def add_pipeline_widgets(
self,
model_type: ModelType = ModelType.Main,
window_width: int = 120,
**kwargs,
) -> dict[str, npyscreen.widget]:
"""Similar to add_model_widgets() but adds some additional widgets at the bottom
to support the autoload directory"""
widgets = self.add_model_widgets(
model_type=model_type,
window_width=window_width,
install_prompt=f"Installed {model_type.value.title()} models. Unchecked models in the InvokeAI root directory will be deleted. Enter URLs, paths or repo_ids to import.",
**kwargs,
)
return widgets
def resize(self) -> None:
super().resize()
if s := self.starter_pipelines.get("models_selected"):
if model_list := self.starter_pipelines.get("models"):
s.values = [self.model_labels[x] for x in model_list]
def _toggle_tables(self, value: List[int]) -> None:
selected_tab = value[0]
widgets = [
self.starter_pipelines,
self.pipeline_models,
self.controlnet_models,
self.t2i_models,
self.ipadapter_models,
self.lora_models,
self.ti_models,
]
for group in widgets:
for _k, v in group.items():
try:
v.hidden = True
v.editable = False
except Exception:
pass
for _k, v in widgets[selected_tab].items():
try:
v.hidden = False
if not isinstance(v, (npyscreen.FixedText, npyscreen.TitleFixedText, CenteredTitleText)):
v.editable = True
except Exception:
pass
self.__class__.current_tab = selected_tab # for persistence
self.display()
def _get_model_labels(self) -> dict[str, str]:
"""Return a list of trimmed labels for all models."""
window_width, window_height = get_terminal_size()
checkbox_width = 4
spacing_width = 2
result = {}
models = self.all_models
label_width = max([len(models[x].name or "") for x in self.starter_models])
description_width = window_width - label_width - checkbox_width - spacing_width
for key in self.all_models:
description = models[key].description
description = (
description[0 : description_width - 3] + "..."
if description and len(description) > description_width
else description
if description
else ""
)
result[key] = f"%-{label_width}s %s" % (models[key].name, description)
return result
def _get_columns(self) -> int:
window_width, window_height = get_terminal_size()
cols = 4 if window_width > 240 else 3 if window_width > 160 else 2 if window_width > 80 else 1
return min(cols, len(self.installed_models))
def confirm_deletions(self, selections: InstallSelections) -> bool:
remove_models = selections.remove_models
if remove_models:
model_names = [self.all_models[x].name or "" for x in remove_models]
mods = "\n".join(model_names)
is_ok = npyscreen.notify_ok_cancel(
f"These unchecked models will be deleted from disk. Continue?\n---------\n{mods}"
)
assert isinstance(is_ok, bool) # npyscreen doesn't have return type annotations
return is_ok
else:
return True
@property
def all_models(self) -> Dict[str, UnifiedModelInfo]:
# npyscreen doesn't having typing hints
return self.parentApp.install_helper.all_models # type: ignore
@property
def starter_models(self) -> List[str]:
return self.parentApp.install_helper._starter_models # type: ignore
@property
def installed_models(self) -> List[str]:
return self.parentApp.install_helper._installed_models # type: ignore
def on_back(self) -> None:
self.parentApp.switchFormPrevious()
self.editing = False
def on_cancel(self) -> None:
self.parentApp.setNextForm(None)
self.parentApp.user_cancelled = True
self.editing = False
def on_done(self) -> None:
self.marshall_arguments()
if not self.confirm_deletions(self.parentApp.install_selections):
return
self.parentApp.setNextForm(None)
self.parentApp.user_cancelled = False
self.editing = False
def marshall_arguments(self) -> None:
"""
Assemble arguments and store as attributes of the application:
.starter_models: dict of model names to install from INITIAL_CONFIGURE.yaml
True => Install
False => Remove
.scan_directory: Path to a directory of models to scan and import
.autoscan_on_startup: True if invokeai should scan and import at startup time
.import_model_paths: list of URLs, repo_ids and file paths to import
"""
selections = self.parentApp.install_selections
all_models = self.all_models
# Defined models (in INITIAL_CONFIG.yaml or models.yaml) to add/remove
ui_sections = [
self.starter_pipelines,
self.pipeline_models,
self.controlnet_models,
self.t2i_models,
self.ipadapter_models,
self.lora_models,
self.ti_models,
]
for section in ui_sections:
if "models_selected" not in section:
continue
selected = {section["models"][x] for x in section["models_selected"].value}
models_to_install = [x for x in selected if not self.all_models[x].installed]
models_to_remove = [x for x in section["models"] if x not in selected and self.all_models[x].installed]
selections.remove_models.extend(models_to_remove)
selections.install_models.extend([all_models[x] for x in models_to_install])
# models located in the 'download_ids" section
for section in ui_sections:
if downloads := section.get("download_ids"):
models = [UnifiedModelInfo(source=x) for x in downloads.value.split()]
selections.install_models.extend(models)
class AddModelApplication(npyscreen.NPSAppManaged): # type: ignore
def __init__(self, opt: Namespace, install_helper: InstallHelper):
super().__init__()
self.program_opts = opt
self.user_cancelled = False
self.install_selections = InstallSelections()
self.install_helper = install_helper
def onStart(self) -> None:
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
self.main_form = self.addForm(
"MAIN",
addModelsForm,
name="Install Stable Diffusion Models",
cycle_widgets=False,
)
def list_models(installer: ModelInstallService, model_type: ModelType):
"""Print out all models of type model_type."""
models = installer.record_store.search_by_attr(model_type=model_type)
print(f"Installed models of type `{model_type}`:")
for model in models:
path = (config.models_path / model.path).resolve()
print(f"{model.name:40}{model.base.value:14}{path}")
# --------------------------------------------------------
def select_and_download_models(opt: Namespace) -> None:
"""Prompt user for install/delete selections and execute."""
precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device()))
# unsure how to avoid a typing complaint in the next line: config.precision is an enumerated Literal
config.precision = precision # type: ignore
install_helper = InstallHelper(config, logger)
installer = install_helper.installer
if opt.list_models:
list_models(installer, opt.list_models)
elif opt.add or opt.delete:
selections = InstallSelections(
install_models=[UnifiedModelInfo(source=x) for x in (opt.add or [])], remove_models=opt.delete or []
)
install_helper.add_or_delete(selections)
elif opt.default_only:
selections = InstallSelections(install_models=[install_helper.default_model()])
install_helper.add_or_delete(selections)
elif opt.yes_to_all:
selections = InstallSelections(install_models=install_helper.recommended_models())
install_helper.add_or_delete(selections)
# this is where the TUI is called
else:
if not set_min_terminal_size(MIN_COLS, MIN_LINES):
raise WindowTooSmallException(
"Could not increase terminal size. Try running again with a larger window or smaller font size."
)
installApp = AddModelApplication(opt, install_helper)
try:
installApp.run()
except KeyboardInterrupt:
print("Aborted...")
sys.exit(-1)
install_helper.add_or_delete(installApp.install_selections)
# -------------------------------------
def main() -> None:
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
parser.add_argument(
"--add",
nargs="*",
help="List of URLs, local paths or repo_ids of models to install",
)
parser.add_argument(
"--delete",
nargs="*",
help="List of names of models to delete. Use type:name to disambiguate, as in `controlnet:my_model`",
)
parser.add_argument(
"--full-precision",
dest="full_precision",
action=argparse.BooleanOptionalAction,
type=bool,
default=False,
help="use 32-bit weights instead of faster 16-bit weights",
)
parser.add_argument(
"--yes",
"-y",
dest="yes_to_all",
action="store_true",
help='answer "yes" to all prompts',
)
parser.add_argument(
"--default_only",
action="store_true",
help="Only install the default model",
)
parser.add_argument(
"--list-models",
choices=[x.value for x in ModelType],
help="list installed models",
)
parser.add_argument(
"--root_dir",
dest="root",
type=str,
default=None,
help="path to root of install directory",
)
opt = parser.parse_args()
invoke_args = []
if opt.root:
invoke_args.extend(["--root", opt.root])
if opt.full_precision:
invoke_args.extend(["--precision", "float32"])
config.parse_args(invoke_args)
logger = InvokeAILogger().get_logger(config=config)
if not config.model_conf_path.exists():
logger.info("Your InvokeAI root directory is not set up. Calling invokeai-configure.")
from invokeai.frontend.install.invokeai_configure import invokeai_configure
invokeai_configure()
sys.exit(0)
try:
select_and_download_models(opt)
except AssertionError as e:
logger.error(e)
sys.exit(-1)
except KeyboardInterrupt:
curses.nocbreak()
curses.echo()
curses.endwin()
logger.info("Goodbye! Come back soon.")
except WindowTooSmallException as e:
logger.error(str(e))
except widget.NotEnoughSpaceForWidget as e:
if str(e).startswith("Height of 1 allocated"):
logger.error("Insufficient vertical space for the interface. Please make your window taller and try again")
input("Press any key to continue...")
except Exception as e:
if str(e).startswith("addwstr"):
logger.error(
"Insufficient horizontal space for the interface. Please make your window wider and try again."
)
else:
print(f"An exception has occurred: {str(e)} Details:")
print(traceback.format_exc(), file=sys.stderr)
input("Press any key to continue...")
# -------------------------------------
if __name__ == "__main__":
main()

View File

@ -0,0 +1,438 @@
"""
invokeai.frontend.merge exports a single function called merge_diffusion_models().
It merges 2-3 models together and create a new InvokeAI-registered diffusion model.
Copyright (c) 2023-24 Lincoln Stein and the InvokeAI Development Team
"""
import argparse
import curses
import re
import sys
from argparse import Namespace
from pathlib import Path
from typing import List, Optional, Tuple
import npyscreen
from npyscreen import widget
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.model_install import ModelInstallServiceBase
from invokeai.app.services.model_records import ModelRecordServiceBase
from invokeai.backend.install.install_helper import initialize_installer
from invokeai.backend.model_manager import (
BaseModelType,
ModelFormat,
ModelType,
ModelVariantType,
)
from invokeai.backend.model_manager.merge import ModelMerger
from invokeai.frontend.install.widgets import FloatTitleSlider, SingleSelectColumns, TextBox
config = InvokeAIAppConfig.get_config()
BASE_TYPES = [
(BaseModelType.StableDiffusion1, "Models Built on SD-1.x"),
(BaseModelType.StableDiffusion2, "Models Built on SD-2.x"),
(BaseModelType.StableDiffusionXL, "Models Built on SDXL"),
]
def _parse_args() -> Namespace:
parser = argparse.ArgumentParser(description="InvokeAI model merging")
parser.add_argument(
"--root_dir",
type=Path,
default=config.root,
help="Path to the invokeai runtime directory",
)
parser.add_argument(
"--front_end",
"--gui",
dest="front_end",
action="store_true",
default=False,
help="Activate the text-based graphical front end for collecting parameters. Aside from --root_dir, other parameters will be ignored.",
)
parser.add_argument(
"--models",
dest="model_names",
type=str,
nargs="+",
help="Two to three model names to be merged",
)
parser.add_argument(
"--base_model",
type=str,
choices=[x[0].value for x in BASE_TYPES],
help="The base model shared by the models to be merged",
)
parser.add_argument(
"--merged_model_name",
"--destination",
dest="merged_model_name",
type=str,
help="Name of the output model. If not specified, will be the concatenation of the input model names.",
)
parser.add_argument(
"--alpha",
type=float,
default=0.5,
help="The interpolation parameter, ranging from 0 to 1. It affects the ratio in which the checkpoints are merged. Higher values give more weight to the 2d and 3d models",
)
parser.add_argument(
"--interpolation",
dest="interp",
type=str,
choices=["weighted_sum", "sigmoid", "inv_sigmoid", "add_difference"],
default="weighted_sum",
help='Interpolation method to use. If three models are present, only "add_difference" will work.',
)
parser.add_argument(
"--force",
action="store_true",
help="Try to merge models even if they are incompatible with each other",
)
parser.add_argument(
"--clobber",
"--overwrite",
dest="clobber",
action="store_true",
help="Overwrite the merged model if --merged_model_name already exists",
)
return parser.parse_args()
# ------------------------- GUI HERE -------------------------
class mergeModelsForm(npyscreen.FormMultiPageAction):
interpolations = ["weighted_sum", "sigmoid", "inv_sigmoid"]
def __init__(self, parentApp, name):
self.parentApp = parentApp
self.ALLOW_RESIZE = True
self.FIX_MINIMUM_SIZE_WHEN_CREATED = False
super().__init__(parentApp, name)
@property
def model_record_store(self) -> ModelRecordServiceBase:
installer: ModelInstallServiceBase = self.parentApp.installer
return installer.record_store
def afterEditing(self) -> None:
self.parentApp.setNextForm(None)
def create(self) -> None:
window_height, window_width = curses.initscr().getmaxyx()
self.current_base = 0
self.models = self.get_models(BASE_TYPES[self.current_base][0])
self.model_names = [x[1] for x in self.models]
max_width = max([len(x) for x in self.model_names])
max_width += 6
horizontal_layout = max_width * 3 < window_width
self.add_widget_intelligent(
npyscreen.FixedText,
color="CONTROL",
value="Select two models to merge and optionally a third.",
editable=False,
)
self.add_widget_intelligent(
npyscreen.FixedText,
color="CONTROL",
value="Use up and down arrows to move, <space> to select an item, <tab> and <shift-tab> to move from one field to the next.",
editable=False,
)
self.nextrely += 1
self.base_select = self.add_widget_intelligent(
SingleSelectColumns,
values=[x[1] for x in BASE_TYPES],
value=[self.current_base],
columns=4,
max_height=2,
relx=8,
scroll_exit=True,
)
self.base_select.on_changed = self._populate_models
self.add_widget_intelligent(
npyscreen.FixedText,
value="MODEL 1",
color="GOOD",
editable=False,
rely=6 if horizontal_layout else None,
)
self.model1 = self.add_widget_intelligent(
npyscreen.SelectOne,
values=self.model_names,
value=0,
max_height=len(self.model_names),
max_width=max_width,
scroll_exit=True,
rely=7,
)
self.add_widget_intelligent(
npyscreen.FixedText,
value="MODEL 2",
color="GOOD",
editable=False,
relx=max_width + 3 if horizontal_layout else None,
rely=6 if horizontal_layout else None,
)
self.model2 = self.add_widget_intelligent(
npyscreen.SelectOne,
name="(2)",
values=self.model_names,
value=1,
max_height=len(self.model_names),
max_width=max_width,
relx=max_width + 3 if horizontal_layout else None,
rely=7 if horizontal_layout else None,
scroll_exit=True,
)
self.add_widget_intelligent(
npyscreen.FixedText,
value="MODEL 3",
color="GOOD",
editable=False,
relx=max_width * 2 + 3 if horizontal_layout else None,
rely=6 if horizontal_layout else None,
)
models_plus_none = self.model_names.copy()
models_plus_none.insert(0, "None")
self.model3 = self.add_widget_intelligent(
npyscreen.SelectOne,
name="(3)",
values=models_plus_none,
value=0,
max_height=len(self.model_names) + 1,
max_width=max_width,
scroll_exit=True,
relx=max_width * 2 + 3 if horizontal_layout else None,
rely=7 if horizontal_layout else None,
)
for m in [self.model1, self.model2, self.model3]:
m.when_value_edited = self.models_changed
self.merged_model_name = self.add_widget_intelligent(
TextBox,
name="Name for merged model:",
labelColor="CONTROL",
max_height=3,
value="",
scroll_exit=True,
)
self.force = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Force merge of models created by different diffusers library versions",
labelColor="CONTROL",
value=True,
scroll_exit=True,
)
self.nextrely += 1
self.merge_method = self.add_widget_intelligent(
npyscreen.TitleSelectOne,
name="Merge Method:",
values=self.interpolations,
value=0,
labelColor="CONTROL",
max_height=len(self.interpolations) + 1,
scroll_exit=True,
)
self.alpha = self.add_widget_intelligent(
FloatTitleSlider,
name="Weight (alpha) to assign to second and third models:",
out_of=1.0,
step=0.01,
lowest=0,
value=0.5,
labelColor="CONTROL",
scroll_exit=True,
)
self.model1.editing = True
def models_changed(self) -> None:
models = self.model1.values
selected_model1 = self.model1.value[0]
selected_model2 = self.model2.value[0]
selected_model3 = self.model3.value[0]
merged_model_name = f"{models[selected_model1]}+{models[selected_model2]}"
self.merged_model_name.value = merged_model_name
if selected_model3 > 0:
self.merge_method.values = ["add_difference ( A+(B-C) )"]
self.merged_model_name.value += f"+{models[selected_model3 -1]}" # In model3 there is one more element in the list (None). So we have to subtract one.
else:
self.merge_method.values = self.interpolations
self.merge_method.value = 0
def on_ok(self) -> None:
if self.validate_field_values() and self.check_for_overwrite():
self.parentApp.setNextForm(None)
self.editing = False
self.parentApp.merge_arguments = self.marshall_arguments()
npyscreen.notify("Starting the merge...")
else:
self.editing = True
def on_cancel(self) -> None:
sys.exit(0)
def marshall_arguments(self) -> dict:
model_keys = [x[0] for x in self.models]
models = [
model_keys[self.model1.value[0]],
model_keys[self.model2.value[0]],
]
if self.model3.value[0] > 0:
models.append(model_keys[self.model3.value[0] - 1])
interp = "add_difference"
else:
interp = self.interpolations[self.merge_method.value[0]]
args = {
"model_keys": models,
"alpha": self.alpha.value,
"interp": interp,
"force": self.force.value,
"merged_model_name": self.merged_model_name.value,
}
return args
def check_for_overwrite(self) -> bool:
model_out = self.merged_model_name.value
if model_out not in self.model_names:
return True
else:
result: bool = npyscreen.notify_yes_no(
f"The chosen merged model destination, {model_out}, is already in use. Overwrite?"
)
return result
def validate_field_values(self) -> bool:
bad_fields = []
model_names = self.model_names
selected_models = {model_names[self.model1.value[0]], model_names[self.model2.value[0]]}
if self.model3.value[0] > 0:
selected_models.add(model_names[self.model3.value[0] - 1])
if len(selected_models) < 2:
bad_fields.append(f"Please select two or three DIFFERENT models to compare. You selected {selected_models}")
if len(bad_fields) > 0:
message = "The following problems were detected and must be corrected:"
for problem in bad_fields:
message += f"\n* {problem}"
npyscreen.notify_confirm(message)
return False
else:
return True
def get_models(self, base_model: Optional[BaseModelType] = None) -> List[Tuple[str, str]]: # key to name
models = [
(x.key, x.name)
for x in self.model_record_store.search_by_attr(model_type=ModelType.Main, base_model=base_model)
if x.format == ModelFormat("diffusers")
and hasattr(x, "variant")
and x.variant == ModelVariantType("normal")
]
return sorted(models, key=lambda x: x[1])
def _populate_models(self, value: List[int]) -> None:
base_model = BASE_TYPES[value[0]][0]
self.models = self.get_models(base_model)
self.model_names = [x[1] for x in self.models]
models_plus_none = self.model_names.copy()
models_plus_none.insert(0, "None")
self.model1.values = self.model_names
self.model2.values = self.model_names
self.model3.values = models_plus_none
self.display()
# npyscreen is untyped and causes mypy to get naggy
class Mergeapp(npyscreen.NPSAppManaged): # type: ignore
def __init__(self, installer: ModelInstallServiceBase):
"""Initialize the npyscreen application."""
super().__init__()
self.installer = installer
def onStart(self) -> None:
npyscreen.setTheme(npyscreen.Themes.ElegantTheme)
self.main = self.addForm("MAIN", mergeModelsForm, name="Merge Models Settings")
def run_gui(args: Namespace) -> None:
installer = initialize_installer(config)
mergeapp = Mergeapp(installer)
mergeapp.run()
merge_args = mergeapp.merge_arguments
merger = ModelMerger(installer)
merger.merge_diffusion_models_and_save(**merge_args)
logger.info(f'Models merged into new model: "{merge_args.merged_model_name}".')
def run_cli(args: Namespace) -> None:
assert args.alpha >= 0 and args.alpha <= 1.0, "alpha must be between 0 and 1"
assert (
args.model_names and len(args.model_names) >= 1 and len(args.model_names) <= 3
), "Please provide the --models argument to list 2 to 3 models to merge. Use --help for full usage."
if not args.merged_model_name:
args.merged_model_name = "+".join(args.model_names)
logger.info(f'No --merged_model_name provided. Defaulting to "{args.merged_model_name}"')
installer = initialize_installer(config)
store = installer.record_store
assert (
len(store.search_by_attr(args.merged_model_name, args.base_model, ModelType.Main)) == 0 or args.clobber
), f'A model named "{args.merged_model_name}" already exists. Use --clobber to overwrite.'
merger = ModelMerger(installer)
model_keys = []
for name in args.model_names:
if len(name) == 32 and re.match(r"^[0-9a-f]$", name):
model_keys.append(name)
else:
models = store.search_by_attr(
model_name=name, model_type=ModelType.Main, base_model=BaseModelType(args.base_model)
)
assert len(models) > 0, f"{name}: Unknown model"
assert len(models) < 2, f"{name}: More than one model by this name. Please specify the model key instead."
model_keys.append(models[0].key)
merger.merge_diffusion_models_and_save(
alpha=args.alpha,
model_keys=model_keys,
merged_model_name=args.merged_model_name,
interp=args.interp,
force=args.force,
)
logger.info(f'Models merged into new model: "{args.merged_model_name}".')
def main() -> None:
args = _parse_args()
if args.root_dir:
config.parse_args(["--root", str(args.root_dir)])
else:
config.parse_args([])
try:
if args.front_end:
run_gui(args)
else:
run_cli(args)
except widget.NotEnoughSpaceForWidget as e:
if str(e).startswith("Height of 1 allocated"):
logger.error("You need to have at least two diffusers models defined in models.yaml in order to merge")
else:
logger.error("Not enough room for the user interface. Try making this window larger.")
sys.exit(-1)
except Exception as e:
logger.error(str(e))
sys.exit(-1)
except KeyboardInterrupt:
sys.exit(-1)
if __name__ == "__main__":
main()

View File

@ -3,7 +3,7 @@
"""
This is the frontend to "textual_inversion_training.py".
Copyright (c) 2023 Lincoln Stein and the InvokeAI Development Team
Copyright (c) 2023-24 Lincoln Stein and the InvokeAI Development Team
"""
@ -14,7 +14,7 @@ import sys
import traceback
from argparse import Namespace
from pathlib import Path
from typing import List, Tuple
from typing import Dict, List, Optional, Tuple
import npyscreen
from npyscreen import widget
@ -22,8 +22,9 @@ from omegaconf import OmegaConf
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from ...backend.training import do_textual_inversion_training, parse_args
from invokeai.backend.install.install_helper import initialize_installer
from invokeai.backend.model_manager import ModelType
from invokeai.backend.training import do_textual_inversion_training, parse_args
TRAINING_DATA = "text-inversion-training-data"
TRAINING_DIR = "text-inversion-output"
@ -44,19 +45,21 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
precisions = ["no", "fp16", "bf16"]
learnable_properties = ["object", "style"]
def __init__(self, parentApp, name, saved_args=None):
def __init__(self, parentApp: npyscreen.NPSAppManaged, name: str, saved_args: Optional[Dict[str, str]] = None):
self.saved_args = saved_args or {}
super().__init__(parentApp, name)
def afterEditing(self):
def afterEditing(self) -> None:
self.parentApp.setNextForm(None)
def create(self):
def create(self) -> None:
self.model_names, default = self.get_model_names()
default_initializer_token = ""
default_placeholder_token = ""
saved_args = self.saved_args
assert config is not None
try:
default = self.model_names.index(saved_args["model"])
except Exception:
@ -71,7 +74,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
self.model = self.add_widget_intelligent(
npyscreen.TitleSelectOne,
name="Model Name:",
values=self.model_names,
values=sorted(self.model_names),
value=default,
max_height=len(self.model_names) + 1,
scroll_exit=True,
@ -236,7 +239,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
)
self.model.editing = True
def initializer_changed(self):
def initializer_changed(self) -> None:
placeholder = self.placeholder_token.value
self.prompt_token.value = f"(Trigger by using <{placeholder}> in your prompts)"
self.train_data_dir.value = str(config.root_dir / TRAINING_DATA / placeholder)
@ -275,10 +278,13 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
return True
def get_model_names(self) -> Tuple[List[str], int]:
conf = OmegaConf.load(config.root_dir / "configs/models.yaml")
model_names = [idx for idx in sorted(conf.keys()) if conf[idx].get("format", None) == "diffusers"]
defaults = [idx for idx in range(len(model_names)) if "default" in conf[model_names[idx]]]
default = defaults[0] if len(defaults) > 0 else 0
global config
assert config is not None
installer = initialize_installer(config)
store = installer.record_store
main_models = store.search_by_attr(model_type=ModelType.Main)
model_names = [f"{x.base.value}/{x.type.value}/{x.name}" for x in main_models if x.format == "diffusers"]
default = 0
return (model_names, default)
def marshall_arguments(self) -> dict:
@ -326,7 +332,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
class MyApplication(npyscreen.NPSAppManaged):
def __init__(self, saved_args=None):
def __init__(self, saved_args: Optional[Dict[str, str]] = None):
super().__init__()
self.ti_arguments = None
self.saved_args = saved_args
@ -341,11 +347,12 @@ class MyApplication(npyscreen.NPSAppManaged):
)
def copy_to_embeddings_folder(args: dict):
def copy_to_embeddings_folder(args: Dict[str, str]) -> None:
"""
Copy learned_embeds.bin into the embeddings folder, and offer to
delete the full model and checkpoints.
"""
assert config is not None
source = Path(args["output_dir"], "learned_embeds.bin")
dest_dir_name = args["placeholder_token"].strip("<>")
destination = config.root_dir / "embeddings" / dest_dir_name
@ -358,10 +365,11 @@ def copy_to_embeddings_folder(args: dict):
logger.info(f'Keeping {args["output_dir"]}')
def save_args(args: dict):
def save_args(args: dict) -> None:
"""
Save the current argument values to an omegaconf file
"""
assert config is not None
dest_dir = config.root_dir / TRAINING_DIR
os.makedirs(dest_dir, exist_ok=True)
conf_file = dest_dir / CONF_FILE
@ -373,6 +381,7 @@ def previous_args() -> dict:
"""
Get the previous arguments used.
"""
assert config is not None
conf_file = config.root_dir / TRAINING_DIR / CONF_FILE
try:
conf = OmegaConf.load(conf_file)
@ -383,24 +392,26 @@ def previous_args() -> dict:
return conf
def do_front_end(args: Namespace):
def do_front_end() -> None:
global config
saved_args = previous_args()
myapplication = MyApplication(saved_args=saved_args)
myapplication.run()
if args := myapplication.ti_arguments:
os.makedirs(args["output_dir"], exist_ok=True)
if my_args := myapplication.ti_arguments:
os.makedirs(my_args["output_dir"], exist_ok=True)
# Automatically add angle brackets around the trigger
if not re.match("^<.+>$", args["placeholder_token"]):
args["placeholder_token"] = f"<{args['placeholder_token']}>"
if not re.match("^<.+>$", my_args["placeholder_token"]):
my_args["placeholder_token"] = f"<{my_args['placeholder_token']}>"
args["only_save_embeds"] = True
save_args(args)
my_args["only_save_embeds"] = True
save_args(my_args)
try:
do_textual_inversion_training(InvokeAIAppConfig.get_config(), **args)
copy_to_embeddings_folder(args)
print(my_args)
do_textual_inversion_training(config, **my_args)
copy_to_embeddings_folder(my_args)
except Exception as e:
logger.error("An exception occurred during training. The exception was:")
logger.error(str(e))
@ -408,11 +419,12 @@ def do_front_end(args: Namespace):
logger.error(traceback.format_exc())
def main():
def main() -> None:
global config
args = parse_args()
args: Namespace = parse_args()
config = InvokeAIAppConfig.get_config()
config.parse_args([])
# change root if needed
if args.root_dir:
@ -420,7 +432,7 @@ def main():
try:
if args.front_end:
do_front_end(args)
do_front_end()
else:
do_textual_inversion_training(config, **vars(args))
except AssertionError as e:

View File

@ -0,0 +1,454 @@
#!/usr/bin/env python
"""
This is the frontend to "textual_inversion_training.py".
Copyright (c) 2023-24 Lincoln Stein and the InvokeAI Development Team
"""
import os
import re
import shutil
import sys
import traceback
from argparse import Namespace
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import npyscreen
from npyscreen import widget
from omegaconf import OmegaConf
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.install.install_helper import initialize_installer
from invokeai.backend.model_manager import ModelType
from invokeai.backend.training import do_textual_inversion_training, parse_args
TRAINING_DATA = "text-inversion-training-data"
TRAINING_DIR = "text-inversion-output"
CONF_FILE = "preferences.conf"
config = None
class textualInversionForm(npyscreen.FormMultiPageAction):
resolutions = [512, 768, 1024]
lr_schedulers = [
"linear",
"cosine",
"cosine_with_restarts",
"polynomial",
"constant",
"constant_with_warmup",
]
precisions = ["no", "fp16", "bf16"]
learnable_properties = ["object", "style"]
def __init__(self, parentApp: npyscreen.NPSAppManaged, name: str, saved_args: Optional[Dict[str, str]] = None):
self.saved_args = saved_args or {}
super().__init__(parentApp, name)
def afterEditing(self) -> None:
self.parentApp.setNextForm(None)
def create(self) -> None:
self.model_names, default = self.get_model_names()
default_initializer_token = ""
default_placeholder_token = ""
saved_args = self.saved_args
assert config is not None
try:
default = self.model_names.index(saved_args["model"])
except Exception:
pass
self.add_widget_intelligent(
npyscreen.FixedText,
value="Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields, cursor arrows to make a selection, and space to toggle checkboxes.",
editable=False,
)
self.model = self.add_widget_intelligent(
npyscreen.TitleSelectOne,
name="Model Name:",
values=sorted(self.model_names),
value=default,
max_height=len(self.model_names) + 1,
scroll_exit=True,
)
self.placeholder_token = self.add_widget_intelligent(
npyscreen.TitleText,
name="Trigger Term:",
value="", # saved_args.get('placeholder_token',''), # to restore previous term
scroll_exit=True,
)
self.placeholder_token.when_value_edited = self.initializer_changed
self.nextrely -= 1
self.nextrelx += 30
self.prompt_token = self.add_widget_intelligent(
npyscreen.FixedText,
name="Trigger term for use in prompt",
value="",
editable=False,
scroll_exit=True,
)
self.nextrelx -= 30
self.initializer_token = self.add_widget_intelligent(
npyscreen.TitleText,
name="Initializer:",
value=saved_args.get("initializer_token", default_initializer_token),
scroll_exit=True,
)
self.resume_from_checkpoint = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Resume from last saved checkpoint",
value=False,
scroll_exit=True,
)
self.learnable_property = self.add_widget_intelligent(
npyscreen.TitleSelectOne,
name="Learnable property:",
values=self.learnable_properties,
value=self.learnable_properties.index(saved_args.get("learnable_property", "object")),
max_height=4,
scroll_exit=True,
)
self.train_data_dir = self.add_widget_intelligent(
npyscreen.TitleFilename,
name="Data Training Directory:",
select_dir=True,
must_exist=False,
value=str(
saved_args.get(
"train_data_dir",
config.root_dir / TRAINING_DATA / default_placeholder_token,
)
),
scroll_exit=True,
)
self.output_dir = self.add_widget_intelligent(
npyscreen.TitleFilename,
name="Output Destination Directory:",
select_dir=True,
must_exist=False,
value=str(
saved_args.get(
"output_dir",
config.root_dir / TRAINING_DIR / default_placeholder_token,
)
),
scroll_exit=True,
)
self.resolution = self.add_widget_intelligent(
npyscreen.TitleSelectOne,
name="Image resolution (pixels):",
values=self.resolutions,
value=self.resolutions.index(saved_args.get("resolution", 512)),
max_height=4,
scroll_exit=True,
)
self.center_crop = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Center crop images before resizing to resolution",
value=saved_args.get("center_crop", False),
scroll_exit=True,
)
self.mixed_precision = self.add_widget_intelligent(
npyscreen.TitleSelectOne,
name="Mixed Precision:",
values=self.precisions,
value=self.precisions.index(saved_args.get("mixed_precision", "fp16")),
max_height=4,
scroll_exit=True,
)
self.num_train_epochs = self.add_widget_intelligent(
npyscreen.TitleSlider,
name="Number of training epochs:",
out_of=1000,
step=50,
lowest=1,
value=saved_args.get("num_train_epochs", 100),
scroll_exit=True,
)
self.max_train_steps = self.add_widget_intelligent(
npyscreen.TitleSlider,
name="Max Training Steps:",
out_of=10000,
step=500,
lowest=1,
value=saved_args.get("max_train_steps", 3000),
scroll_exit=True,
)
self.train_batch_size = self.add_widget_intelligent(
npyscreen.TitleSlider,
name="Batch Size (reduce if you run out of memory):",
out_of=50,
step=1,
lowest=1,
value=saved_args.get("train_batch_size", 8),
scroll_exit=True,
)
self.gradient_accumulation_steps = self.add_widget_intelligent(
npyscreen.TitleSlider,
name="Gradient Accumulation Steps (may need to decrease this to resume from a checkpoint):",
out_of=10,
step=1,
lowest=1,
value=saved_args.get("gradient_accumulation_steps", 4),
scroll_exit=True,
)
self.lr_warmup_steps = self.add_widget_intelligent(
npyscreen.TitleSlider,
name="Warmup Steps:",
out_of=100,
step=1,
lowest=0,
value=saved_args.get("lr_warmup_steps", 0),
scroll_exit=True,
)
self.learning_rate = self.add_widget_intelligent(
npyscreen.TitleText,
name="Learning Rate:",
value=str(
saved_args.get("learning_rate", "5.0e-04"),
),
scroll_exit=True,
)
self.scale_lr = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Scale learning rate by number GPUs, steps and batch size",
value=saved_args.get("scale_lr", True),
scroll_exit=True,
)
self.enable_xformers_memory_efficient_attention = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Use xformers acceleration",
value=saved_args.get("enable_xformers_memory_efficient_attention", False),
scroll_exit=True,
)
self.lr_scheduler = self.add_widget_intelligent(
npyscreen.TitleSelectOne,
name="Learning rate scheduler:",
values=self.lr_schedulers,
max_height=7,
value=self.lr_schedulers.index(saved_args.get("lr_scheduler", "constant")),
scroll_exit=True,
)
self.model.editing = True
def initializer_changed(self) -> None:
placeholder = self.placeholder_token.value
self.prompt_token.value = f"(Trigger by using <{placeholder}> in your prompts)"
self.train_data_dir.value = str(config.root_dir / TRAINING_DATA / placeholder)
self.output_dir.value = str(config.root_dir / TRAINING_DIR / placeholder)
self.resume_from_checkpoint.value = Path(self.output_dir.value).exists()
def on_ok(self):
if self.validate_field_values():
self.parentApp.setNextForm(None)
self.editing = False
self.parentApp.ti_arguments = self.marshall_arguments()
npyscreen.notify("Launching textual inversion training. This will take a while...")
else:
self.editing = True
def ok_cancel(self):
sys.exit(0)
def validate_field_values(self) -> bool:
bad_fields = []
if self.model.value is None:
bad_fields.append("Model Name must correspond to a known model in models.yaml")
if not re.match("^[a-zA-Z0-9.-]+$", self.placeholder_token.value):
bad_fields.append("Trigger term must only contain alphanumeric characters, the dot and hyphen")
if self.train_data_dir.value is None:
bad_fields.append("Data Training Directory cannot be empty")
if self.output_dir.value is None:
bad_fields.append("The Output Destination Directory cannot be empty")
if len(bad_fields) > 0:
message = "The following problems were detected and must be corrected:"
for problem in bad_fields:
message += f"\n* {problem}"
npyscreen.notify_confirm(message)
return False
else:
return True
def get_model_names(self) -> Tuple[List[str], int]:
global config
assert config is not None
installer = initialize_installer(config)
store = installer.record_store
main_models = store.search_by_attr(model_type=ModelType.Main)
model_names = [f"{x.base.value}/{x.type.value}/{x.name}" for x in main_models if x.format == "diffusers"]
default = 0
return (model_names, default)
def marshall_arguments(self) -> dict:
args = {}
# the choices
args.update(
model=self.model_names[self.model.value[0]],
resolution=self.resolutions[self.resolution.value[0]],
lr_scheduler=self.lr_schedulers[self.lr_scheduler.value[0]],
mixed_precision=self.precisions[self.mixed_precision.value[0]],
learnable_property=self.learnable_properties[self.learnable_property.value[0]],
)
# all the strings and booleans
for attr in (
"initializer_token",
"placeholder_token",
"train_data_dir",
"output_dir",
"scale_lr",
"center_crop",
"enable_xformers_memory_efficient_attention",
):
args[attr] = getattr(self, attr).value
# all the integers
for attr in (
"train_batch_size",
"gradient_accumulation_steps",
"num_train_epochs",
"max_train_steps",
"lr_warmup_steps",
):
args[attr] = int(getattr(self, attr).value)
# the floats (just one)
args.update(learning_rate=float(self.learning_rate.value))
# a special case
if self.resume_from_checkpoint.value and Path(self.output_dir.value).exists():
args["resume_from_checkpoint"] = "latest"
return args
class MyApplication(npyscreen.NPSAppManaged):
def __init__(self, saved_args: Optional[Dict[str, str]] = None):
super().__init__()
self.ti_arguments = None
self.saved_args = saved_args
def onStart(self):
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
self.main = self.addForm(
"MAIN",
textualInversionForm,
name="Textual Inversion Settings",
saved_args=self.saved_args,
)
def copy_to_embeddings_folder(args: Dict[str, str]) -> None:
"""
Copy learned_embeds.bin into the embeddings folder, and offer to
delete the full model and checkpoints.
"""
assert config is not None
source = Path(args["output_dir"], "learned_embeds.bin")
dest_dir_name = args["placeholder_token"].strip("<>")
destination = config.root_dir / "embeddings" / dest_dir_name
os.makedirs(destination, exist_ok=True)
logger.info(f"Training completed. Copying learned_embeds.bin into {str(destination)}")
shutil.copy(source, destination)
if (input("Delete training logs and intermediate checkpoints? [y] ") or "y").startswith(("y", "Y")):
shutil.rmtree(Path(args["output_dir"]))
else:
logger.info(f'Keeping {args["output_dir"]}')
def save_args(args: dict) -> None:
"""
Save the current argument values to an omegaconf file
"""
assert config is not None
dest_dir = config.root_dir / TRAINING_DIR
os.makedirs(dest_dir, exist_ok=True)
conf_file = dest_dir / CONF_FILE
conf = OmegaConf.create(args)
OmegaConf.save(config=conf, f=conf_file)
def previous_args() -> dict:
"""
Get the previous arguments used.
"""
assert config is not None
conf_file = config.root_dir / TRAINING_DIR / CONF_FILE
try:
conf = OmegaConf.load(conf_file)
conf["placeholder_token"] = conf["placeholder_token"].strip("<>")
except Exception:
conf = None
return conf
def do_front_end() -> None:
global config
saved_args = previous_args()
myapplication = MyApplication(saved_args=saved_args)
myapplication.run()
if my_args := myapplication.ti_arguments:
os.makedirs(my_args["output_dir"], exist_ok=True)
# Automatically add angle brackets around the trigger
if not re.match("^<.+>$", my_args["placeholder_token"]):
my_args["placeholder_token"] = f"<{my_args['placeholder_token']}>"
my_args["only_save_embeds"] = True
save_args(my_args)
try:
print(my_args)
do_textual_inversion_training(config, **my_args)
copy_to_embeddings_folder(my_args)
except Exception as e:
logger.error("An exception occurred during training. The exception was:")
logger.error(str(e))
logger.error("DETAILS:")
logger.error(traceback.format_exc())
def main() -> None:
global config
args: Namespace = parse_args()
config = InvokeAIAppConfig.get_config()
config.parse_args([])
# change root if needed
if args.root_dir:
config.root = args.root_dir
try:
if args.front_end:
do_front_end()
else:
do_textual_inversion_training(config, **vars(args))
except AssertionError as e:
logger.error(e)
sys.exit(-1)
except KeyboardInterrupt:
pass
except (widget.NotEnoughSpaceForWidget, Exception) as e:
if str(e).startswith("Height of 1 allocated"):
logger.error("You need to have at least one diffusers models defined in models.yaml in order to train")
elif str(e).startswith("addwstr"):
logger.error("Not enough window space for the interface. Please make your window larger and try again.")
else:
logger.error(e)
sys.exit(-1)
if __name__ == "__main__":
main()

View File

@ -1,131 +1,26 @@
module.exports = {
env: {
browser: true,
es6: true,
node: true,
},
extends: [
'eslint:recommended',
'plugin:@typescript-eslint/recommended',
'plugin:react/recommended',
'plugin:react-hooks/recommended',
'plugin:react/jsx-runtime',
'prettier',
'plugin:storybook/recommended',
],
parser: '@typescript-eslint/parser',
parserOptions: {
ecmaFeatures: {
jsx: true,
},
ecmaVersion: 2018,
sourceType: 'module',
},
plugins: [
'react',
'@typescript-eslint',
'eslint-plugin-react-hooks',
'i18next',
'path',
'unused-imports',
'simple-import-sort',
'eslint-plugin-import',
// These rules are too strict for normal usage, but are useful for optimizing rerenders
// '@arthurgeron/react-usememo',
],
root: true,
extends: ['@invoke-ai/eslint-config-react'],
plugins: ['path', 'i18next'],
rules: {
// TODO(psyche): Enable this rule. Requires no default exports in components - many changes.
'react-refresh/only-export-components': 'off',
// TODO(psyche): Enable this rule. Requires a lot of eslint-disable-next-line comments.
'@typescript-eslint/consistent-type-assertions': 'off',
// https://github.com/qdanik/eslint-plugin-path
'path/no-relative-imports': ['error', { maxDepth: 0 }],
curly: 'error',
'i18next/no-literal-string': 'warn',
'react/jsx-no-bind': ['error', { allowBind: true }],
'react/jsx-curly-brace-presence': [
'error',
{ props: 'never', children: 'never' },
],
'react-hooks/exhaustive-deps': 'error',
'no-var': 'error',
'brace-style': 'error',
'prefer-template': 'error',
'import/no-duplicates': 'error',
radix: 'error',
'space-before-blocks': 'error',
'import/prefer-default-export': 'off',
'@typescript-eslint/no-unused-vars': 'off',
'unused-imports/no-unused-imports': 'error',
'unused-imports/no-unused-vars': [
'warn',
{
vars: 'all',
varsIgnorePattern: '^_',
args: 'after-used',
argsIgnorePattern: '^_',
},
],
// These rules are too strict for normal usage, but are useful for optimizing rerenders
// '@arthurgeron/react-usememo/require-usememo': [
// 'warn',
// {
// strict: false,
// checkHookReturnObject: false,
// fix: { addImports: true },
// checkHookCalls: false,
// },
// ],
// '@arthurgeron/react-usememo/require-memo': 'warn',
'@typescript-eslint/ban-ts-comment': 'warn',
'@typescript-eslint/no-explicit-any': 'warn',
'@typescript-eslint/no-empty-interface': [
'error',
{
allowSingleExtends: true,
},
],
'@typescript-eslint/consistent-type-imports': [
'error',
{
prefer: 'type-imports',
fixStyle: 'separate-type-imports',
disallowTypeAnnotations: true,
},
],
'@typescript-eslint/no-import-type-side-effects': 'error',
'simple-import-sort/imports': 'error',
'simple-import-sort/exports': 'error',
// Prefer @invoke-ai/ui components over chakra
'no-restricted-imports': 'off',
'@typescript-eslint/no-restricted-imports': [
'warn',
{
paths: [
{
name: '@chakra-ui/react',
message: "Please import from '@invoke-ai/ui' instead.",
},
{
name: '@chakra-ui/layout',
message: "Please import from '@invoke-ai/ui' instead.",
},
{
name: '@chakra-ui/portal',
message: "Please import from '@invoke-ai/ui' instead.",
},
],
},
],
// https://github.com/edvardchen/eslint-plugin-i18next/blob/HEAD/docs/rules/no-literal-string.md
'i18next/no-literal-string': 'error',
},
overrides: [
/**
* Overrides for stories
*/
{
files: ['*.stories.tsx'],
rules: {
// We may not have i18n available in stories.
'i18next/no-literal-string': 'off',
},
},
],
settings: {
react: {
version: 'detect',
},
},
};

View File

@ -1,9 +1,5 @@
module.exports = {
trailingComma: 'es5',
tabWidth: 2,
semi: true,
singleQuote: true,
endOfLine: 'auto',
...require('@invoke-ai/prettier-config-react'),
overrides: [
{
files: ['public/locales/*.json'],

View File

@ -1,7 +1,7 @@
import { PropsWithChildren, memo, useEffect } from 'react';
import { modelChanged } from '../src/features/parameters/store/generationSlice';
import { useAppDispatch } from '../src/app/store/storeHooks';
import { useGlobalModifiersInit } from '@invoke-ai/ui';
import { useGlobalModifiersInit } from '@invoke-ai/ui-library';
/**
* Initializes some state for storybook. Must be in a different component
* so that it is run inside the redux context.

View File

@ -6,7 +6,6 @@ import { Provider } from 'react-redux';
import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider';
import { $baseUrl } from '../src/app/store/nanostores/baseUrl';
import { createStore } from '../src/app/store/store';
import { Container } from '@chakra-ui/react';
// TODO: Disabled for IDE performance issues with our translation JSON
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore

View File

@ -1,13 +1,7 @@
{
"entry": ["src/main.tsx"],
"extensions": [".ts", ".tsx"],
"ignorePatterns": [
"**/node_modules/**",
"dist/**",
"public/**",
"**/*.stories.tsx",
"config/**"
],
"ignorePatterns": ["**/node_modules/**", "dist/**", "public/**", "**/*.stories.tsx", "config/**"],
"ignoreUnresolved": [],
"ignoreUnimported": ["src/i18.d.ts", "vite.config.ts", "src/vite-env.d.ts"],
"respectGitignore": true,

View File

@ -0,0 +1,150 @@
# Invoke UI
<!-- @import "[TOC]" {cmd="toc" depthFrom=2 depthTo=3 orderedList=false} -->
<!-- code_chunk_output -->
- [Dev environment](#dev-environment)
- [Setup](#setup)
- [Package scripts](#package-scripts)
- [Type generation](#type-generation)
- [Localization](#localization)
- [VSCode](#vscode)
- [Contributing](#contributing)
- [Check in before investing your time](#check-in-before-investing-your-time)
- [Commit format](#commit-format)
- [Submitting a PR](#submitting-a-pr)
- [Other docs](#other-docs)
<!-- /code_chunk_output -->
Invoke's UI is made possible by many contributors and open-source libraries. Thank you!
## Dev environment
### Setup
1. Install [node] and [pnpm].
1. Run `pnpm i` to install all packages.
#### Run in dev mode
1. From `invokeai/frontend/web/`, run `pnpm dev`.
1. From repo root, run `python scripts/invokeai-web.py`.
1. Point your browser to the dev server address, e.g. <http://localhost:5173/>
### Package scripts
- `dev`: run the frontend in dev mode, enabling hot reloading
- `build`: run all checks (madge, eslint, prettier, tsc) and then build the frontend
- `typegen`: generate types from the OpenAPI schema (see [Type generation])
- `lint:madge`: check frontend for circular dependencies
- `lint:eslint`: check frontend for code quality
- `lint:prettier`: check frontend for code formatting
- `lint:tsc`: check frontend for type issues
- `lint`: run all checks concurrently
- `fix`: run `eslint` and `prettier`, fixing fixable issues
### Type generation
We use [openapi-typescript] to generate types from the app's OpenAPI schema.
The generated types are committed to the repo in [schema.ts].
```sh
# from the repo root, start the server
python scripts/invokeai-web.py
# from invokeai/frontend/web/, run the script
pnpm typegen
```
### Localization
We use [i18next] for localization, but translation to languages other than English happens on our [Weblate] project.
Only the English source strings should be changed on this repo.
### VSCode
#### Example debugger config
```jsonc
{
"version": "0.2.0",
"configurations": [
{
"type": "chrome",
"request": "launch",
"name": "Invoke UI",
"url": "http://localhost:5173",
"webRoot": "${workspaceFolder}/invokeai/frontend/web",
},
],
}
```
#### Remote dev
We've noticed an intermittent timeout issue with the VSCode remote dev port forwarding.
We suggest disabling the editor's port forwarding feature and doing it manually via SSH:
```sh
ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@host
```
## Contributing Guidelines
Thanks for your interest in contributing to the Invoke Web UI!
Please follow these guidelines when contributing.
### Check in before investing your time
Please check in before you invest your time on anything besides a trivial fix, in case it conflicts with ongoing work or isn't aligned with the vision for the app.
If a feature request or issue doesn't already exist for the thing you want to work on, please create one.
Ping `@psychedelicious` on [discord] in the `#frontend-dev` channel or in the feature request / issue you want to work on - we're happy chat.
### Code conventions
- This is a fairly complex app with a deep component tree. Please use memoization (`useCallback`, `useMemo`, `memo`) with enthusiasm.
- If you need to add some global, ephemeral state, please use [nanostores] if possible.
- Be careful with your redux selectors. If they need to be parameterized, consider creating them inside a `useMemo`.
- Feel free to use `lodash` (via `lodash-es`) to make the intent of your code clear.
- Please add comments describing the "why", not the "how" (unless it is really arcane).
### Commit format
Please use the [conventional commits] spec for the web UI, with a scope of "ui":
- `chore(ui): bump deps`
- `chore(ui): lint`
- `feat(ui): add some cool new feature`
- `fix(ui): fix some bug`
### Submitting a PR
- Ensure your branch is tidy. Use an interactive rebase to clean up the commit history and reword the commit messages if they are not descriptive.
- Run `pnpm lint`. Some issues are auto-fixable with `pnpm fix`.
- Fill out the PR form when creating the PR.
- It doesn't need to be super detailed, but a screenshot or video is nice if you changed something visually.
- If a section isn't relevant, delete it. There are no UI tests at this time.
## Other docs
- [Workflows - Design and Implementation]
- [State Management]
[node]: https://nodejs.org/en/download/
[pnpm]: https://github.com/pnpm/pnpm
[discord]: https://discord.gg/ZmtBAhwWhy
[i18next]: https://github.com/i18next/react-i18next
[Weblate]: https://hosted.weblate.org/engage/invokeai/
[openapi-typescript]: https://github.com/drwpow/openapi-typescript
[Type generation]: #type-generation
[schema.ts]: ../src/services/api/schema.ts
[conventional commits]: https://www.conventionalcommits.org/en/v1.0.0/
[Workflows - Design and Implementation]: ./docs/WORKFLOWS_DESIGN_IMPLEMENTATION.md
[State Management]: ./docs/STATE_MGMT.md

View File

@ -22,12 +22,13 @@ export const packageConfig: UserConfig = {
fileName: (format) => `invoke-ai-ui.${format}.js`,
},
rollupOptions: {
external: ['react', 'react-dom', '@emotion/react', '@chakra-ui/react'],
external: ['react', 'react-dom', '@emotion/react', '@chakra-ui/react', '@invoke-ai/ui-library'],
output: {
globals: {
react: 'React',
'react-dom': 'ReactDOM',
'@emotion/react': 'EmotionReact',
'@invoke-ai/ui-library': 'UiLibrary',
},
},
},

View File

@ -1,154 +0,0 @@
# InvokeAI Web UI
<!-- @import "[TOC]" {cmd="toc" depthFrom=1 depthTo=6 orderedList=false} -->
<!-- code_chunk_output -->
- [InvokeAI Web UI](#invokeai-web-ui)
- [Core Libraries](#core-libraries)
- [Redux Toolkit](#redux-toolkit)
- [Socket\.IO](#socketio)
- [Chakra UI](#chakra-ui)
- [KonvaJS](#konvajs)
- [Vite](#vite)
- [i18next & Weblate](#i18next--weblate)
- [openapi-typescript](#openapi-typescript)
- [reactflow](#reactflow)
- [zod](#zod)
- [Client Types Generation](#client-types-generation)
- [Package Scripts](#package-scripts)
- [Contributing](#contributing)
- [Dev Environment](#dev-environment)
- [VSCode Remote Dev](#vscode-remote-dev)
- [Production builds](#production-builds)
<!-- /code_chunk_output -->
The UI is a fairly straightforward Typescript React app.
## Core Libraries
InvokeAI's UI is made possible by a number of excellent open-source libraries. The most heavily-used are listed below, but there are many others.
### Redux Toolkit
[Redux Toolkit] is used for state management and fetching/caching:
- `RTK-Query` for data fetching and caching
- `createAsyncThunk` for a couple other HTTP requests
- `createEntityAdapter` to normalize things like images and models
- `createListenerMiddleware` for async workflows
We use [redux-remember] for persistence.
### Socket\.IO
[Socket.IO] is used for server-to-client events, like generation process and queue state changes.
### Chakra UI
[Chakra UI] is our primary UI library, but we also use a few components from [Mantine v6].
### KonvaJS
[KonvaJS] powers the canvas. In the future, we'd like to explore [PixiJS] or WebGPU.
### Vite
[Vite] is our bundler.
### i18next & Weblate
We use [i18next] for localization, but translation to languages other than English happens on our [Weblate] project. **Only the English source strings should be changed on this repo.**
### openapi-typescript
[openapi-typescript] is used to generate types from the server's OpenAPI schema. See TYPES_CODEGEN.md.
### reactflow
[reactflow] powers the Workflow Editor.
### zod
[zod] schemas are used to model data structures and provide runtime validation.
## Client Types Generation
We use [openapi-typescript] to generate types from the app's OpenAPI schema.
The generated types are written to `invokeai/frontend/web/src/services/api/schema.d.ts`. This file is committed to the repo.
The server must be started and available at <http://127.0.0.1:9090>.
```sh
# from the repo root, start the server
python scripts/invokeai-web.py
# from invokeai/frontend/web/, run the script
pnpm typegen
```
## Package Scripts
See `package.json` for all scripts.
Run with `pnpm <script name>`.
- `dev`: run the frontend in dev mode, enabling hot reloading
- `build`: run all checks (madge, eslint, prettier, tsc) and then build the frontend
- `typegen`: generate types from the OpenAPI schema (see [Client Types Generation](#client-types-generation))
- `lint:madge`: check frontend for circular dependencies
- `lint:eslint`: check frontend for code quality
- `lint:prettier`: check frontend for code formatting
- `lint:tsc`: check frontend for type issues
- `lint`: run all checks concurrently
- `fix`: run `eslint` and `prettier`, fixing fixable issues
## Contributing
Thanks for your interest in contributing to the InvokeAI Web UI!
We encourage you to ping @psychedelicious and @blessedcoolant on [discord] if you want to contribute, just to touch base and ensure your work doesn't conflict with anything else going on. The project is very active.
### Dev Environment
Install [node] and [pnpm].
From `invokeai/frontend/web/` run `pnpm i` to get everything set up.
Start everything in dev mode:
1. Start the dev server: `pnpm dev`
2. Start the InvokeAI Nodes backend: `python scripts/invokeai-web.py # run from the repo root`
3. Point your browser to the dev server address e.g. <http://localhost:5173/>
#### VSCode Remote Dev
We've noticed an intermittent issue with the VSCode Remote Dev port forwarding. If you use this feature of VSCode, you may intermittently click the Invoke button and then get nothing until the request times out. Suggest disabling the IDE's port forwarding feature and doing it manually via SSH:
`ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@host`
### Production builds
For a number of technical and logistical reasons, we need to commit UI build artefacts to the repo.
If you submit a PR, there is a good chance we will ask you to include a separate commit with a build of the app.
To build for production, run `pnpm build`.
[node]: https://nodejs.org/en/download/
[pnpm]: https://github.com/pnpm/pnpm
[discord]: https://discord.gg/ZmtBAhwWhy
[Redux Toolkit]: https://github.com/reduxjs/redux-toolkit
[redux-remember]: https://github.com/zewish/redux-remember
[Socket.IO]: https://github.com/socketio/socket.io
[Chakra UI]: https://github.com/chakra-ui/chakra-ui
[Mantine v6]: https://v6.mantine.dev/
[KonvaJS]: https://github.com/konvajs/react-konva
[PixiJS]: https://github.com/pixijs/pixijs
[Vite]: https://github.com/vitejs/vite
[i18next]: https://github.com/i18next/react-i18next
[Weblate]: https://hosted.weblate.org/engage/invokeai/
[openapi-typescript]: https://github.com/drwpow/openapi-typescript
[reactflow]: https://github.com/xyflow/xyflow
[zod]: https://github.com/colinhacks/zod

View File

@ -0,0 +1,38 @@
# State Management
The app makes heavy use of Redux Toolkit, its Query library, and `nanostores`.
## Redux
TODO
## `nanostores`
[nanostores] is a tiny state management library. It provides both imperative and declarative APIs.
### Example
```ts
export const $myStringOption = atom<string | null>(null);
// Outside a component, or within a callback for performance-critical logic
$myStringOption.get();
$myStringOption.set('new value');
// Inside a component
const myStringOption = useStore($myStringOption);
```
### Where to put nanostores
- For global application state, export your stores from `invokeai/frontend/web/src/app/store/nanostores/`.
- For feature state, create a file for the stores next to the redux slice definition (e.g. `invokeai/frontend/web/src/features/myFeature/myFeatureNanostores.ts`).
- For hooks with global state, export the store from the same file the hook is in, or put it next to the hook.
### When to use nanostores
- For non-serializable data that needs to be available throughout the app, use `nanostores` instead of a global.
- For ephemeral global state (i.e. state that does not need to be persisted), use `nanostores` instead of redux.
- For performance-critical code and in callbacks, redux selectors can be problematic due to the declarative reactivity system. Consider refactoring to use `nanostores` if there's a **measurable** performance issue.
[nanostores]: https://github.com/nanostores/nanostores/

View File

@ -23,7 +23,7 @@
- [Primitive Types](#primitive-types)
- [Complex Types](#complex-types)
- [Collection Types](#collection-types)
- [Polymorphic Types](#polymorphic-types)
- [Collection or Scalar Types](#collection-or-scalar-types)
- [Optional Fields](#optional-fields)
- [Building Field Input Templates](#building-field-input-templates)
- [Building Field Output Templates](#building-field-output-templates)

View File

@ -19,8 +19,8 @@
"dist"
],
"scripts": {
"dev": "concurrently \"vite dev\" \"pnpm run theme:watch\"",
"dev:host": "concurrently \"vite dev --host\" \"pnpm run theme:watch\"",
"dev": "vite dev",
"dev:host": "vite dev --host",
"build": "pnpm run lint && vite build",
"typegen": "node scripts/typegen.js",
"preview": "vite preview",
@ -31,9 +31,6 @@
"lint": "concurrently -g -n eslint,prettier,tsc,madge -c cyan,green,magenta,yellow \"pnpm run lint:eslint\" \"pnpm run lint:prettier\" \"pnpm run lint:tsc\" \"pnpm run lint:madge\"",
"fix": "eslint --fix . && prettier --log-level warn --write .",
"preinstall": "npx only-allow pnpm",
"postinstall": "pnpm run theme",
"theme": "chakra-cli tokens node_modules/@invoke-ai/ui",
"theme:watch": "chakra-cli tokens node_modules/@invoke-ai/ui --watch",
"storybook": "storybook dev -p 6006",
"build-storybook": "storybook build",
"unimported": "npx unimported"
@ -52,21 +49,12 @@
}
},
"dependencies": {
"@chakra-ui/anatomy": "^2.2.2",
"@chakra-ui/icons": "^2.1.1",
"@chakra-ui/layout": "^2.3.1",
"@chakra-ui/portal": "^2.1.0",
"@chakra-ui/react": "^2.8.2",
"@chakra-ui/react-use-size": "^2.1.0",
"@chakra-ui/styled-system": "^2.9.2",
"@chakra-ui/theme-tools": "^2.1.2",
"@dagrejs/graphlib": "^2.1.13",
"@dnd-kit/core": "^6.1.0",
"@dnd-kit/utilities": "^3.2.2",
"@emotion/react": "^11.11.3",
"@emotion/styled": "^11.11.0",
"@fontsource-variable/inter": "^5.0.16",
"@invoke-ai/ui": "0.0.10",
"@invoke-ai/ui-library": "^0.0.18",
"@mantine/form": "6.0.21",
"@nanostores/react": "^0.7.1",
"@reduxjs/toolkit": "2.0.1",
@ -116,7 +104,6 @@
"zod-validation-error": "^3.0.0"
},
"peerDependencies": {
"@chakra-ui/cli": "^2.4.1",
"@chakra-ui/react": "^2.8.2",
"react": "^18.2.0",
"react-dom": "^18.2.0",
@ -124,7 +111,8 @@
},
"devDependencies": {
"@arthurgeron/eslint-plugin-react-usememo": "^2.2.3",
"@chakra-ui/cli": "^2.4.1",
"@invoke-ai/eslint-config-react": "^0.0.13",
"@invoke-ai/prettier-config-react": "^0.0.6",
"@storybook/addon-docs": "^7.6.10",
"@storybook/addon-essentials": "^7.6.10",
"@storybook/addon-interactions": "^7.6.10",
@ -164,7 +152,7 @@
"storybook": "^7.6.10",
"ts-toolbelt": "^9.6.0",
"typescript": "^5.3.3",
"vite": "^5.0.11",
"vite": "^5.0.12",
"vite-plugin-css-injected-by-js": "^3.3.1",
"vite-plugin-dts": "^3.7.1",
"vite-plugin-eslint": "^1.8.1",

File diff suppressed because it is too large Load Diff

View File

@ -98,7 +98,7 @@
"outputs": "Ausgabe",
"data": "Daten",
"safetensors": "Safetensors",
"outpaint": "outpaint",
"outpaint": "Ausmalen",
"details": "Details",
"format": "Format",
"unknown": "Unbekannt",
@ -110,7 +110,29 @@
"somethingWentWrong": "Etwas ist schief gelaufen",
"copyError": "$t(gallery.copy) Fehler",
"input": "Eingabe",
"notInstalled": "Nicht $t(common.installed)"
"notInstalled": "Nicht $t(common.installed)",
"advancedOptions": "Erweiterte Einstellungen",
"alpha": "Alpha",
"red": "Rot",
"green": "Grün",
"blue": "Blau",
"delete": "Löschen",
"or": "oder",
"direction": "Richtung",
"free": "Frei",
"save": "Speichern",
"preferencesLabel": "Präferenzen",
"created": "Erstellt",
"prevPage": "Vorherige Seite",
"nextPage": "Nächste Seite",
"unknownError": "Unbekannter Fehler",
"unsaved": "Nicht gespeichert",
"aboutDesc": "Verwenden Sie Invoke für die Arbeit? Dann siehe hier:",
"localSystem": "Lokales System",
"orderBy": "Ordnen nach",
"saveAs": "Speicher als",
"updated": "Aktualisiert",
"copy": "Kopieren"
},
"gallery": {
"generations": "Erzeugungen",
@ -140,7 +162,13 @@
"currentlyInUse": "Dieses Bild wird derzeit in den folgenden Funktionen verwendet:",
"deleteImagePermanent": "Gelöschte Bilder können nicht wiederhergestellt werden.",
"autoAssignBoardOnClick": "Board per Klick automatisch zuweisen",
"noImageSelected": "Kein Bild ausgewählt"
"noImageSelected": "Kein Bild ausgewählt",
"problemDeletingImagesDesc": "Eins oder mehr Bilder könnten nicht gelöscht werden",
"starImage": "Bild markieren",
"assets": "Ressourcen",
"unstarImage": "Markierung Entfernen",
"image": "Bild",
"deleteSelection": "Lösche markierte"
},
"hotkeys": {
"keyboardShortcuts": "Tastenkürzel",
@ -344,7 +372,13 @@
"addNodes": {
"title": "Knotenpunkt hinzufügen",
"desc": "Öffnet das Menü zum Hinzufügen von Knoten"
}
},
"cancelAndClear": {
"title": "Abbruch und leeren"
},
"noHotkeysFound": "Kein Hotkey gefunden",
"searchHotkeys": "Hotkeys durchsuchen",
"clearSearch": "Suche leeren"
},
"modelManager": {
"modelAdded": "Model hinzugefügt",
@ -701,7 +735,8 @@
"invokeProgressBar": "Invoke Fortschrittsanzeige",
"mode": "Modus",
"resetUI": "$t(accessibility.reset) von UI",
"createIssue": "Ticket erstellen"
"createIssue": "Ticket erstellen",
"about": "Über"
},
"boards": {
"autoAddBoard": "Automatisches Hinzufügen zum Ordner",
@ -809,7 +844,14 @@
"canny": "Canny",
"hedDescription": "Ganzheitlich verschachtelte Kantenerkennung",
"scribble": "Scribble",
"maxFaces": "Maximal Anzahl Gesichter"
"maxFaces": "Maximal Anzahl Gesichter",
"resizeSimple": "Größe ändern (einfach)",
"large": "Groß",
"modelSize": "Modell Größe",
"small": "Klein",
"base": "Basis",
"depthAnything": "Depth Anything",
"depthAnythingDescription": "Erstellung einer Tiefenkarte mit der Depth Anything-Technik"
},
"queue": {
"status": "Status",
@ -842,7 +884,7 @@
"item": "Auftrag",
"notReady": "Warteschlange noch nicht bereit",
"batchValues": "Stapel Werte",
"queueCountPrediction": "{{predicted}} zur Warteschlange hinzufügen",
"queueCountPrediction": "{{promptsCount}} Prompts × {{iterations}} Iterationen -> {{count}} Generationen",
"queuedCount": "{{pending}} wartenden Elemente",
"clearQueueAlertDialog": "Die Warteschlange leeren, stoppt den aktuellen Prozess und leert die Warteschlange komplett.",
"completedIn": "Fertig in",
@ -864,7 +906,9 @@
"back": "Hinten",
"resumeSucceeded": "Prozessor wieder aufgenommen",
"resumeTooltip": "Prozessor wieder aufnehmen",
"time": "Zeit"
"time": "Zeit",
"batchQueuedDesc_one": "{{count}} Eintrage ans {{direction}} der Wartschlange hinzugefügt",
"batchQueuedDesc_other": "{{count}} Einträge ans {{direction}} der Wartschlange hinzugefügt"
},
"metadata": {
"negativePrompt": "Negativ Beschreibung",
@ -933,7 +977,8 @@
"enable": "Aktivieren",
"clear": "Leeren",
"maxCacheSize": "Maximale Cache Größe",
"cacheSize": "Cache Größe"
"cacheSize": "Cache Größe",
"useCache": "Benutze Cache"
},
"embedding": {
"noMatchingEmbedding": "Keine passenden Embeddings",
@ -999,5 +1044,28 @@
"selectLoRA": "Wählen ein LoRA aus",
"esrganModel": "ESRGAN Modell",
"addLora": "LoRA hinzufügen"
},
"accordions": {
"generation": {
"title": "Erstellung",
"modelTab": "Modell",
"conceptsTab": "Konzepte"
},
"image": {
"title": "Bild"
},
"advanced": {
"title": "Erweitert"
},
"control": {
"title": "Kontrolle",
"controlAdaptersTab": "Kontroll Adapter",
"ipTab": "Bild Beschreibung"
},
"compositing": {
"coherenceTab": "Kohärenzpass",
"infillTab": "Füllung",
"title": "Compositing"
}
}
}

View File

@ -86,6 +86,7 @@
"back": "Back",
"batch": "Batch Manager",
"cancel": "Cancel",
"copy": "Copy",
"copyError": "$t(gallery.copy) Error",
"close": "Close",
"on": "On",
@ -224,6 +225,7 @@
"amult": "a_mult",
"autoConfigure": "Auto configure processor",
"balanced": "Balanced",
"base": "Base",
"beginEndStepPercent": "Begin / End Step Percentage",
"bgth": "bg_th",
"canny": "Canny",
@ -237,6 +239,8 @@
"controlMode": "Control Mode",
"crop": "Crop",
"delete": "Delete",
"depthAnything": "Depth Anything",
"depthAnythingDescription": "Depth map generation using the Depth Anything technique",
"depthMidas": "Depth (Midas)",
"depthMidasDescription": "Depth map generation using Midas",
"depthZoe": "Depth (Zoe)",
@ -256,6 +260,7 @@
"colorMapTileSize": "Tile Size",
"importImageFromCanvas": "Import Image From Canvas",
"importMaskFromCanvas": "Import Mask From Canvas",
"large": "Large",
"lineart": "Lineart",
"lineartAnime": "Lineart Anime",
"lineartAnimeDescription": "Anime-style lineart processing",
@ -268,6 +273,7 @@
"minConfidence": "Min Confidence",
"mlsd": "M-LSD",
"mlsdDescription": "Minimalist Line Segment Detector",
"modelSize": "Model Size",
"none": "None",
"noneDescription": "No processing applied",
"normalBae": "Normal BAE",
@ -288,6 +294,7 @@
"selectModel": "Select a model",
"setControlImageDimensions": "Set Control Image Dimensions To W/H",
"showAdvanced": "Show Advanced",
"small": "Small",
"toggleControlNet": "Toggle this ControlNet",
"w": "W",
"weight": "Weight",
@ -600,6 +607,10 @@
"desc": "Send current image to Image to Image",
"title": "Send To Image To Image"
},
"remixImage": {
"desc": "Use all parameters except seed from the current image",
"title": "Remix image"
},
"setParameters": {
"desc": "Use all parameters of the current image",
"title": "Set Parameters"
@ -1003,6 +1014,9 @@
"newWorkflow": "New Workflow",
"newWorkflowDesc": "Create a new workflow?",
"newWorkflowDesc2": "Your current workflow has unsaved changes.",
"clearWorkflow": "Clear Workflow",
"clearWorkflowDesc": "Clear this workflow and start a new one?",
"clearWorkflowDesc2": "Your current workflow has unsaved changes.",
"scheduler": "Scheduler",
"schedulerDescription": "TODO",
"sDXLMainModelField": "SDXL Model",
@ -1216,6 +1230,7 @@
"useCpuNoise": "Use CPU Noise",
"cpuNoise": "CPU Noise",
"gpuNoise": "GPU Noise",
"remixImage": "Remix Image",
"useInitImg": "Use Initial Image",
"usePrompt": "Use Prompt",
"useSeed": "Use Seed",
@ -1361,6 +1376,7 @@
"problemCopyingCanvasDesc": "Unable to export base layer",
"problemCopyingImage": "Unable to Copy Image",
"problemCopyingImageLink": "Unable to Copy Image Link",
"problemDownloadingImage": "Unable to Download Image",
"problemDownloadingCanvas": "Problem Downloading Canvas",
"problemDownloadingCanvasDesc": "Unable to export base layer",
"problemImportingMask": "Problem Importing Mask",
@ -1452,9 +1468,7 @@
},
"compositingCoherencePass": {
"heading": "Coherence Pass",
"paragraphs": [
"A second round of denoising helps to composite the Inpainted/Outpainted image."
]
"paragraphs": ["A second round of denoising helps to composite the Inpainted/Outpainted image."]
},
"compositingCoherenceMode": {
"heading": "Mode",
@ -1462,10 +1476,7 @@
},
"compositingCoherenceSteps": {
"heading": "Steps",
"paragraphs": [
"Number of denoising steps used in the Coherence Pass.",
"Same as the main Steps parameter."
]
"paragraphs": ["Number of denoising steps used in the Coherence Pass.", "Same as the main Steps parameter."]
},
"compositingStrength": {
"heading": "Strength",
@ -1487,15 +1498,11 @@
},
"controlNetControlMode": {
"heading": "Control Mode",
"paragraphs": [
"Lends more weight to either the prompt or ControlNet."
]
"paragraphs": ["Lends more weight to either the prompt or ControlNet."]
},
"controlNetResizeMode": {
"heading": "Resize Mode",
"paragraphs": [
"How the ControlNet image will be fit to the image output size."
]
"paragraphs": ["How the ControlNet image will be fit to the image output size."]
},
"controlNet": {
"heading": "ControlNet",
@ -1505,9 +1512,7 @@
},
"controlNetWeight": {
"heading": "Weight",
"paragraphs": [
"How strongly the ControlNet will impact the generated image."
]
"paragraphs": ["How strongly the ControlNet will impact the generated image."]
},
"dynamicPrompts": {
"heading": "Dynamic Prompts",
@ -1519,9 +1524,7 @@
},
"dynamicPromptsMaxPrompts": {
"heading": "Max Prompts",
"paragraphs": [
"Limits the number of prompts that can be generated by Dynamic Prompts."
]
"paragraphs": ["Limits the number of prompts that can be generated by Dynamic Prompts."]
},
"dynamicPromptsSeedBehaviour": {
"heading": "Seed Behaviour",
@ -1538,9 +1541,7 @@
},
"lora": {
"heading": "LoRA Weight",
"paragraphs": [
"Higher LoRA weight will lead to larger impacts on the final image."
]
"paragraphs": ["Higher LoRA weight will lead to larger impacts on the final image."]
},
"noiseUseCPU": {
"heading": "Use CPU Noise",
@ -1552,9 +1553,7 @@
},
"paramCFGScale": {
"heading": "CFG Scale",
"paragraphs": [
"Controls how much your prompt influences the generation process."
]
"paragraphs": ["Controls how much your prompt influences the generation process."]
},
"paramCFGRescaleMultiplier": {
"heading": "CFG Rescale Multiplier",
@ -1606,9 +1605,7 @@
},
"paramVAE": {
"heading": "VAE",
"paragraphs": [
"Model used for translating AI output into the final image."
]
"paragraphs": ["Model used for translating AI output into the final image."]
},
"paramVAEPrecision": {
"heading": "VAE Precision",
@ -1697,6 +1694,7 @@
"workflowLibrary": "Library",
"userWorkflows": "My Workflows",
"defaultWorkflows": "Default Workflows",
"projectWorkflows": "Project Workflows",
"openWorkflow": "Open Workflow",
"uploadWorkflow": "Load from File",
"deleteWorkflow": "Delete Workflow",
@ -1704,11 +1702,13 @@
"downloadWorkflow": "Save to File",
"saveWorkflow": "Save Workflow",
"saveWorkflowAs": "Save Workflow As",
"saveWorkflowToProject": "Save Workflow to Project",
"savingWorkflow": "Saving Workflow...",
"problemSavingWorkflow": "Problem Saving Workflow",
"workflowSaved": "Workflow Saved",
"noRecentWorkflows": "No Recent Workflows",
"noUserWorkflows": "No User Workflows",
"noWorkflows": "No Workflows",
"noSystemWorkflows": "No System Workflows",
"problemLoading": "Problem Loading Workflows",
"loading": "Loading Workflows",
@ -1717,6 +1717,7 @@
"clearWorkflowSearchFilter": "Clear Workflow Search Filter",
"workflowName": "Workflow Name",
"newWorkflowCreated": "New Workflow Created",
"workflowCleared": "Workflow Cleared",
"workflowEditorMenu": "Workflow Editor Menu",
"workflowIsOpen": "Workflow is Open"
},

View File

@ -118,7 +118,15 @@
"advancedOptions": "Opzioni avanzate",
"free": "Libero",
"or": "o",
"preferencesLabel": "Preferenze"
"preferencesLabel": "Preferenze",
"red": "Rosso",
"aboutHeading": "Possiedi il tuo potere creativo",
"aboutDesc": "Utilizzi Invoke per lavoro? Guarda qui:",
"localSystem": "Sistema locale",
"green": "Verde",
"blue": "Blu",
"alpha": "Alfa",
"copy": "Copia"
},
"gallery": {
"generations": "Generazioni",
@ -377,7 +385,11 @@
"desc": "Apre e chiude le opzioni e i pannelli della galleria",
"title": "Attiva/disattiva le Opzioni e la Galleria"
},
"clearSearch": "Cancella ricerca"
"clearSearch": "Cancella ricerca",
"remixImage": {
"desc": "Utilizza tutti i parametri tranne il seme dell'immagine corrente",
"title": "Remixa l'immagine"
}
},
"modelManager": {
"modelManager": "Gestione Modelli",
@ -521,7 +533,8 @@
"customConfigFileLocation": "Posizione del file di configurazione personalizzato",
"vaePrecision": "Precisione VAE",
"noModelSelected": "Nessun modello selezionato",
"conversionNotSupported": "Conversione non supportata"
"conversionNotSupported": "Conversione non supportata",
"configFile": "File di configurazione"
},
"parameters": {
"images": "Immagini",
@ -660,7 +673,10 @@
"lockAspectRatio": "Blocca proporzioni",
"swapDimensions": "Scambia dimensioni",
"aspect": "Aspetto",
"setToOptimalSizeTooLarge": "$t(parameters.setToOptimalSize) (potrebbe essere troppo grande)"
"setToOptimalSizeTooLarge": "$t(parameters.setToOptimalSize) (potrebbe essere troppo grande)",
"boxBlur": "Box",
"gaussianBlur": "Gaussian",
"remixImage": "Remixa l'immagine"
},
"settings": {
"models": "Modelli",
@ -794,7 +810,9 @@
"invalidUpload": "Caricamento non valido",
"problemDeletingWorkflow": "Problema durante l'eliminazione del flusso di lavoro",
"workflowDeleted": "Flusso di lavoro eliminato",
"problemRetrievingWorkflow": "Problema nel recupero del flusso di lavoro"
"problemRetrievingWorkflow": "Problema nel recupero del flusso di lavoro",
"resetInitialImage": "Reimposta l'immagine iniziale",
"uploadInitialImage": "Carica l'immagine iniziale"
},
"tooltip": {
"feature": {
@ -899,7 +917,8 @@
"loadMore": "Carica altro",
"mode": "Modalità",
"resetUI": "$t(accessibility.reset) l'Interfaccia Utente",
"createIssue": "Segnala un problema"
"createIssue": "Segnala un problema",
"about": "Informazioni"
},
"ui": {
"hideProgressImages": "Nascondi avanzamento immagini",
@ -1232,7 +1251,11 @@
"scribble": "Scarabocchio",
"amult": "Angolo di illuminazione",
"coarse": "Approssimativo",
"resizeSimple": "Ridimensiona (semplice)"
"resizeSimple": "Ridimensiona (semplice)",
"large": "Grande",
"small": "Piccolo",
"depthAnythingDescription": "Generazione di mappe di profondità utilizzando la tecnica Depth Anything",
"modelSize": "Dimensioni del modello"
},
"queue": {
"queueFront": "Aggiungi all'inizio della coda",
@ -1664,7 +1687,9 @@
"userWorkflows": "I miei flussi di lavoro",
"newWorkflowCreated": "Nuovo flusso di lavoro creato",
"downloadWorkflow": "Salva su file",
"uploadWorkflow": "Carica da file"
"uploadWorkflow": "Carica da file",
"projectWorkflows": "Flussi di lavoro del progetto",
"noWorkflows": "Nessun flusso di lavoro"
},
"app": {
"storeNotInitialized": "Il negozio non è inizializzato"

View File

@ -1,30 +1,36 @@
{
"accessibility": {
"invokeProgressBar": "Invoke ilerleme durumu",
"nextImage": "Sonraki Resim",
"useThisParameter": "Kullanıcı parametreleri",
"copyMetadataJson": "Metadata verilerini kopyala (JSON)",
"exitViewer": "Görüntüleme Modundan Çık",
"invokeProgressBar": "Invoke durum çubuğu",
"nextImage": "Sonraki Görsel",
"useThisParameter": "Bu ayarları kullan",
"copyMetadataJson": "Üstveriyi kopyala (JSON)",
"exitViewer": "Görüntüleyiciden Çık",
"zoomIn": "Yakınlaştır",
"zoomOut": "Uzaklaştır",
"rotateCounterClockwise": "Döndür (Saat yönünün tersine)",
"rotateClockwise": "Döndür (Saat yönünde)",
"rotateCounterClockwise": "Saat yönünün tersine döndür",
"rotateClockwise": "Saat yönüne döndür",
"flipHorizontally": "Yatay Çevir",
"flipVertically": "Dikey Çevir",
"modifyConfig": "Ayarları Değiştir",
"toggleAutoscroll": "Otomatik kaydırmayı aç/kapat",
"toggleLogViewer": "Günlük Görüntüleyici Aç/Kapa",
"showOptionsPanel": "Ayarlar Panelini Göster",
"modelSelect": "Model Seçin",
"reset": "Sıfırla",
"uploadImage": "Resim Yükle",
"previousImage": "Önceki Resim",
"menu": "Menü"
"toggleAutoscroll": "Otomatik kaydırmayı Aç-Kapat",
"toggleLogViewer": "Günlüğü Aç-Kapat",
"showOptionsPanel": "Yan Paneli Göster",
"modelSelect": "Model Seçimi",
"reset": "Resetle",
"uploadImage": "Görsel Yükle",
"previousImage": "Önceki Görsel",
"menu": "Menü",
"about": "Hakkında",
"mode": "Kip",
"resetUI": "$t(accessibility.reset)Arayüz",
"showGalleryPanel": "Galeri Panelini Göster",
"loadMore": "Daha Getir",
"createIssue": "Sorun Bildir"
},
"common": {
"hotkeysLabel": "Kısayol Tuşları",
"languagePickerLabel": "Dil Seçimi",
"reportBugLabel": "Hata Bildir",
"languagePickerLabel": "Dil",
"reportBugLabel": "Sorun Bildir",
"githubLabel": "Github",
"discordLabel": "Discord",
"settingsLabel": "Ayarlar",
@ -37,22 +43,636 @@
"langJapanese": "Japonca",
"langPolish": "Lehçe",
"langPortuguese": "Portekizce",
"langBrPortuguese": "Portekizcr (Brezilya)",
"langBrPortuguese": "Portekizce (Brezilya)",
"langRussian": "Rusça",
"langSimplifiedChinese": "Çince (Basit)",
"langUkranian": "Ukraynaca",
"langSpanish": "İspanyolca",
"txt2img": "Metinden Resime",
"img2img": "Resimden Metine",
"linear": "Çizgisel",
"nodes": "Düğümler",
"postprocessing": "İşlem Sonrası",
"postProcessing": "İşlem Sonrası",
"postProcessDesc2": "Daha gelişmiş özellikler için ve iş akışını kolaylaştırmak için özel bir kullanıcı arayüzü çok yakında yayınlanacaktır.",
"postProcessDesc3": "Invoke AI komut satırı arayüzü, bir çok yeni özellik sunmaktadır.",
"txt2img": "Yazıdan Görsel",
"img2img": "Görselden Görsel",
"linear": "Doğrusal",
"nodes": "İş Akışı Düzenleyici",
"postprocessing": "Rötuş",
"postProcessing": "Rötuş",
"postProcessDesc2": "Daha gelişmiş iş akışlarına olanak sağlayacak özel bir arayüz yakında yayınlanacaktır.",
"postProcessDesc3": "Invoke AI Komut Satırı Arayüzü, içlerinde Embiggen da bulunan birçok özellik sunmaktadır.",
"langKorean": "Korece",
"unifiedCanvas": "Akıllı Tuval",
"nodesDesc": "Görüntülerin oluşturulmasında hazırladığımız yeni bir sistem geliştirme aşamasındadır. Bu harika özellikler ve çok daha fazlası için bizi takip etmeye devam edin.",
"postProcessDesc1": "Invoke AI son kullanıcıya yönelik bir çok özellik sunar. Görüntü kalitesi yükseltme, yüz restorasyonu WebUI üzerinden kullanılabilir. Metinden resime ve resimden metne araçlarına gelişmiş seçenekler menüsünden ulaşabilirsiniz. İsterseniz mevcut görüntü ekranının üzerindeki veya görüntüleyicideki görüntüyü doğrudan düzenleyebilirsiniz."
"unifiedCanvas": "Tuval",
"nodesDesc": "Görsel oluşturmaya yardımcı çizge tabanlı sistem şimdilik geliştirme aşamasındadır. Bu süper özellik hakkındaki gelişmeler için kulağınız bizde olsun.",
"postProcessDesc1": "Invoke AI birçok rötuş (post-process) aracı sağlar. Görsel büyütme ve yüz iyileştirme WebUI üzerinden kullanıma uygun durumdadır. Bunlara Yazıdan Görsel ve Görselden Görsel sekmelerindeki Gelişmiş Ayarlar menüsünden ulaşabilirsiniz. Ayrıca var olan görseli üzerindeki düğmeler yardımıyla düzenleyebilirsiniz.",
"batch": "Toplu İş Yöneticisi",
"accept": "Onayla",
"cancel": "Vazgeç",
"advanced": "Gelişmiş",
"copyError": "$t(gallery.copy) Hata",
"on": "Açık",
"or": "ya da",
"aboutDesc": "Invoke'u iş için mi kullanıyorsunuz? Şuna bir göz atın:",
"advancedOptions": "Gelişmiş Ayarlar",
"ai": "yapay zeka",
"close": "Kapat",
"auto": "Otomatik",
"communityLabel": "Topluluk",
"back": "Geri",
"areYouSure": "Emin misiniz?",
"notInstalled": "$t(common.installed) Değil",
"openInNewTab": "Yeni Sekmede Aç",
"aboutHeading": "Yaratıcı Gücünüzün Sahibi Olun",
"lightMode": "Açık Tema",
"load": "Yükle",
"loading": "Yükleniyor",
"loadingInvokeAI": "Invoke AI Yükleniyor",
"localSystem": "Yerel Sistem",
"inpaint": "içboyama",
"modelManager": "Model Yöneticisi",
"orderBy": "Sırala",
"outpaint": "dışboyama",
"outputs": ıktılar",
"langHebrew": "İbranice",
"learnMore": "Bilgi Edin",
"nodeEditor": "Çizge Düzenleyici",
"save": "Kaydet",
"statusMergingModels": "Modeller Birleştiriliyor",
"statusGenerating": "Oluşturuluyor",
"statusGenerationComplete": "Oluşturma Bitti",
"statusGeneratingOutpainting": "Dışboyama Oluşturuluyor",
"statusLoadingModel": "Model Yükleniyor",
"random": "Rastgele",
"simple": "Basit",
"preferencesLabel": "Seçenekler",
"statusConnected": "Bağlandı",
"statusMergedModels": "Modeller Birleştirildi",
"statusModelChanged": "Model Değişti",
"statusModelConverted": "Model Dönüştürüldü",
"statusPreparing": "Hazırlanıyor",
"statusProcessing": "İşleniyor",
"statusProcessingCanceled": "İşlemden Vazgeçildi",
"statusRestoringFacesCodeFormer": "Yüzler İyileştiriliyor (CodeFormer)",
"statusRestoringFacesGFPGAN": "Yüzler İyileştiriliyor (GFPGAN)",
"template": "Şablon",
"saveAs": "Farklı Kaydet",
"statusProcessingComplete": "İşlem Bitti",
"statusSavingImage": "Görsel Kaydediliyor",
"somethingWentWrong": "Bir sorun oluştu",
"statusConvertingModel": "Model Dönüştürülüyor",
"statusDisconnected": "Bağlantı Kesildi",
"statusError": "Hata",
"statusGeneratingImageToImage": "Görselden Görsel Oluşturuluyor",
"statusGeneratingInpainting": "İçboyama Oluşturuluyor",
"statusRestoringFaces": "Yüzler İyileştiriliyor",
"statusUpscaling": "Büyütme",
"statusUpscalingESRGAN": "Büyütme (ESRGAN)",
"training": "Eğitim",
"statusGeneratingTextToImage": "Yazıdan Görsel Oluşturuluyor",
"imagePrompt": "Görsel İstemi",
"unknown": "Bilinmeyen",
"green": "Yeşil",
"red": "Kırmızı",
"blue": "Mavi",
"alpha": "Alfa",
"file": "Dosya",
"folder": "Klasör",
"format": "biçim",
"details": "Ayrıntılar",
"error": "Hata",
"generate": "Oluştur",
"free": "Serbest",
"imageFailedToLoad": "Görsel Yüklenemedi",
"safetensors": "Safetensors",
"upload": "Yükle",
"nextPage": "Sonraki Sayfa",
"prevPage": "Önceki Sayfa",
"dontAskMeAgain": "Bir daha sorma",
"delete": "Kaldır",
"direction": "Yön",
"darkMode": "Koyu Tema",
"unsaved": "Kaydedilmemiş",
"unknownError": "Bilinmeyen Hata",
"installed": "Yüklü",
"data": "Veri",
"input": "Giriş",
"copy": "Kopyala",
"created": "Yaratma",
"updated": "Güncelleme"
},
"accordions": {
"generation": {
"title": "Oluşturma",
"modelTab": "Model",
"conceptsTab": "Kavramlar"
},
"image": {
"title": "Görsel"
},
"advanced": {
"title": "Gelişmiş"
},
"compositing": {
"title": "Birleştirme",
"coherenceTab": "Uyum Geçişi",
"infillTab": "Doldurma"
},
"control": {
"ipTab": "Görsel İstemleri"
}
},
"boards": {
"autoAddBoard": "Panoya Otomatik Ekleme",
"cancel": "Vazgeç",
"clearSearch": "Aramayı Sil",
"deleteBoard": "Panoyu Sil",
"loading": "Yükleniyor...",
"myBoard": "Panom",
"selectBoard": "Bir Pano Seç",
"addBoard": "Pano Ekle",
"deleteBoardAndImages": "Panoyu ve Görselleri Sil",
"deleteBoardOnly": "Sadece Panoyu Sil",
"deletedBoardsCannotbeRestored": "Silinen panolar geri getirilemez",
"menuItemAutoAdd": "Bu panoya otomatik olarak ekle",
"move": "Taşı",
"movingImagesToBoard_one": "{{count}} görseli şu panoya taşı:",
"movingImagesToBoard_other": "{{count}} görseli şu panoya taşı:",
"noMatching": "Eşleşen pano yok",
"searchBoard": "Pano Ara...",
"topMessage": "Bu pano, şuralarda kullanılan görseller içeriyor:",
"downloadBoard": "Panoyu İndir",
"uncategorized": "Kategorisiz",
"changeBoard": "Panoyu Değiştir",
"bottomMessage": "Bu panoyu ve görselleri silmek, bunları kullanan özelliklerin resetlemesine neden olacaktır."
},
"controlnet": {
"balanced": "Dengeli",
"contentShuffle": "İçerik Karıştırma",
"contentShuffleDescription": "Görselin içeriğini karıştırır",
"depthZoe": "Derinlik (Zoe)",
"depthZoeDescription": "Zoe kullanarak derinlik haritası oluşturma",
"resizeMode": "Boyutlandırma Kipi",
"addControlNet": "$t(common.controlNet) Ekle",
"addIPAdapter": "$t(common.ipAdapter) Ekle",
"addT2IAdapter": "$t(common.t2iAdapter) Ekle",
"controlNetEnabledT2IDisabled": "$t(common.controlNet) etkin, $t(common.t2iAdapter)s etkin değil",
"t2iEnabledControlNetDisabled": "$t(common.t2iAdapter) etkin, $t(common.controlNet)s etkin değil",
"colorMap": "Renk",
"crop": "Kırpma",
"delete": "Kaldır",
"depthMidas": "Derinlik (Midas)",
"depthMidasDescription": "Midas kullanarak derinlik haritası oluşturma",
"detectResolution": "Çözünürlüğü Bul",
"none": "Hiçbiri",
"noneDescription": "Hiçbir işlem uygulanmamış",
"selectModel": "Model seçin",
"showAdvanced": "Gelişmiş Ayarları Göster",
"controlNetT2IMutexDesc": "$t(common.controlNet) ve $t(common.t2iAdapter)'nün birlikte kullanımı şimdilik desteklenmiyor.",
"canny": "Canny",
"colorMapDescription": "Görselden renk haritası oluşturur",
"handAndFace": "El ve Yüz",
"processor": "İşlemci",
"prompt": "İstem",
"duplicate": "Kopyala",
"large": "Büyük",
"modelSize": "Model Boyutu",
"resize": "Boyutlandır",
"resizeSimple": "Boyutlandır (Basit)",
"safe": "Güvenli",
"small": "Küçük",
"weight": "Etki",
"cannyDescription": "Canny kenar algılama",
"fill": "Doldur",
"highThreshold": "Üst Eşik",
"imageResolution": "Görsel Çözünürlüğü",
"colorMapTileSize": "Karo Boyutu",
"importImageFromCanvas": "Tuvaldeki Görseli Al",
"importMaskFromCanvas": "Tuvalden Maskeyi İçe Aktar",
"lowThreshold": "Alt Eşik",
"base": "Taban",
"depthAnythingDescription": "Depth Anything yöntemi ile derinlik haritası oluşturma"
},
"queue": {
"queuedCount": "{{pending}} Sırada",
"resumeSucceeded": "İşlem Sürdürüldü",
"openQueue": "Sırayı Göster",
"cancelSucceeded": "İş Geri Çekildi",
"cancelFailed": "İşi Geri Çekmede Sorun",
"prune": "Arındır",
"pruneTooltip": "{{item_count}} Bitmiş İşi Sil",
"resumeFailed": "İşlemi Sürdürmede Sorun",
"pauseFailed": "İşlemi Duraklatmada Sorun",
"cancelBatchSucceeded": "Toplu İşten Vazgeçildi",
"pruneSucceeded": "{{item_count}} Bitmiş İş Sıradan Silindi",
"in_progress": "İşleniyor",
"completed": "Bitti",
"canceled": "Vazgeçildi",
"back": "arka",
"queueFront": "Sıranın Başına Ekle",
"queueBack": "Sıraya Ekle",
"resumeTooltip": "İşlemi Sürdür",
"clearQueueAlertDialog2": "Sırayı boşaltmak istediğinizden emin misiniz?",
"batchQueuedDesc_one": "{{count}} iş sıranın {{direction}} eklendi",
"batchQueuedDesc_other": "{{count}} iş sıranın {{direction}} eklendi",
"batchFailedToQueue": "Toplu İş Sıraya Alınamadı",
"front": "ön",
"queue": "Sıra",
"resume": "Sürdür",
"queueTotal": "Toplam {{total}}",
"queueEmpty": "Sıra Boş",
"clearQueueAlertDialog": "Sırayı boşaltma düğmesi geçerli işlemi durdurur ve sırayı boşaltır.",
"current": "Şimdiki",
"time": "Süre",
"pause": "Duraklat",
"pauseTooltip": "İşlemi Duraklat",
"pruneFailed": "Sırayı Arındırmada Sorun",
"clearTooltip": "Vazgeç ve Tüm İşleri Sil",
"clear": "Boşalt",
"cancelBatchFailed": "Toplu İşten Vazgeçmede Sorun",
"next": "Sonraki",
"status": "Durum",
"failed": "Başarısız",
"item": "İş",
"enqueueing": "Toplu İş Sıraya Alınıyor",
"pauseSucceeded": "İşlem Duraklatıldı",
"cancel": "Vazgeç",
"cancelTooltip": "Bu İşi Geri Çek",
"clearSucceeded": "Sıra Boşaltıldı",
"clearFailed": "Sırayı Boşaltmada Sorun",
"cancelBatch": "Toplu İşten Vazgeç",
"cancelItem": "İşi Geri Çek",
"total": "Toplam",
"pending": "Sırada",
"completedIn": "'de bitirildi",
"batch": "Toplu İş",
"session": "Oturum",
"batchQueued": "Toplu İş Sıraya Alındı",
"notReady": "Sıraya Alınamadı",
"batchFieldValues": "Toplu İş Değişkenleri",
"queueMaxExceeded": "Sıra sınırı {{max_queue_size}} aşıldı, {{skip}} atlanıyor"
},
"invocationCache": {
"cacheSize": "Önbellek Boyutu",
"disable": "Kapat",
"clear": "Boşalt",
"maxCacheSize": "Maksimum Önbellek Boyutu",
"useCache": "Önbellek Kullan",
"enable": "Aç"
},
"gallery": {
"deleteImageBin": "Silinen görseller işletim sisteminin çöp kutusuna gönderilir.",
"deleteImagePermanent": "Silinen görseller geri getirilemez.",
"assets": "Özkaynaklar",
"autoAssignBoardOnClick": "Tıklanan Panoya Otomatik Atama",
"loading": "Yükleniyor",
"starImage": "Yıldız Koy",
"download": "İndir",
"deleteSelection": "Seçileni Sil",
"preparingDownloadFailed": "İndirme Hazırlanırken Sorun",
"problemDeletingImages": "Görsel Silmede Sorun",
"featuresWillReset": "Bu görseli silerseniz, o özellikler resetlenecektir.",
"galleryImageResetSize": "Boyutu Resetle",
"noImageSelected": "Görsel Seçilmedi",
"unstarImage": "Yıldızı Kaldır",
"uploads": "Yüklemeler",
"problemDeletingImagesDesc": "Bir ya da daha çok görsel silinemedi",
"gallerySettings": "Galeri Ayarları",
"image": "görsel",
"galleryImageSize": "Görsel Boyutu",
"allImagesLoaded": "Tüm Görseller Yüklendi",
"copy": "Kopyala",
"noImagesInGallery": "Gösterilecek Görsel Yok",
"autoSwitchNewImages": "Yeni Görseli Biter Bitmez Gör",
"maintainAspectRatio": "En-Boy Oranını Koru",
"currentlyInUse": "Bu görsel şurada kullanımda:",
"deleteImage": "Görseli Sil",
"loadMore": "Daha Getir",
"setCurrentImage": "Çalışma Görseli Yap",
"unableToLoad": "Galeri Yüklenemedi",
"downloadSelection": "Seçileni İndir",
"preparingDownload": "İndirmeye Hazırlanıyor",
"singleColumnLayout": "Tek Sütun Düzen",
"generations": ıktılar",
"showUploads": "Yüklenenleri Göster",
"showGenerations": ıktıları Göster"
},
"hrf": {
"hrf": "Yüksek Çözünürlük Kürü",
"enableHrf": "Yüksek Çözünürlük Kürünü Aç",
"hrfStrength": "Yüksek Çözünürlük Kürü Etkisi",
"strengthTooltip": "Düşük değerler daha az detaya neden olsa da olası bozuklukları önleyebilir.",
"metadata": {
"enabled": "Yüksek Çözünürlük Kürü Açık",
"strength": "Yüksek Çözünürlük Kürü Etkisi",
"method": "Yüksek Çözünürlük Kürü Yöntemi"
},
"upscaleMethod": "Büyütme Yöntemi",
"enableHrfTooltip": "Daha düşük bir çözünürlükle oluşturmaya başlar, ana çözünürlüğe büyütür ve Görselden Görsel'i çalıştırır."
},
"hotkeys": {
"noHotkeysFound": "Kısayol Tuşu Bulanamadı",
"searchHotkeys": "Kısayol Tuşlarında Ara",
"clearSearch": "Aramayı Sil",
"colorPicker": {
"title": "Renk Seçici",
"desc": "Tuvalde renk seçiciye geçer"
},
"consoleToggle": {
"title": "Konsolu Aç-Kapat",
"desc": "Konsolu aç-kapat"
},
"hideMask": {
"desc": "Maskeyi gizle-göster",
"title": "Maskeyi Gizle"
},
"focusPrompt": {
"title": "İsteme Odaklan",
"desc": "Görsel istemi alanına odaklanır"
},
"keyboardShortcuts": "Kısayol Tuşları",
"nextImage": {
"title": "Sonraki Görsel",
"desc": "Galerideki sonraki görseli göster"
},
"maximizeWorkSpace": {
"desc": "Panelleri kapat ve çalışma alanını genişlet",
"title": "Çalışma Alanını Genişlet"
},
"pinOptions": {
"desc": "Ayar panelini iğnele",
"title": "Ayarları İğnele"
},
"nodesHotkeys": "Çizgeler",
"quickToggleMove": {
"desc": "Geçici olarak Kayma Aracına geçer",
"title": "Geçici Kayma"
},
"showHideBoundingBox": {
"title": "Sınırlayıcı Kutuyu Gizle/Göster",
"desc": "Sınırlayıcı kutunun görünürlüğünü değiştir"
},
"showInfo": {
"desc": "Seçili görselin üstverisini göster",
"title": "Bilgileri Göster"
},
"nextStagingImage": {
"desc": "Sonraki Görsel Parçayı Göster",
"title": "Sonraki Görsel Parça"
},
"acceptStagingImage": {
"desc": "Geçiçi Görsel Parçasını Onayla",
"title": "Geçiçi Görsel Parçasını Onayla"
},
"changeTabs": {
"desc": "Çalışma alanını değiştir",
"title": "Sekmeyi değiştir"
},
"closePanels": {
"title": "Panelleri Kapat",
"desc": "Açık panelleri kapat"
},
"decreaseBrushOpacity": {
"title": "Fırça Saydamlığını Artır",
"desc": "Tuval fırçasının saydamlığını artırır"
},
"clearMask": {
"title": "Maskeyi Sil",
"desc": "Tüm maskeyi sil"
},
"decreaseGalleryThumbSize": {
"desc": "Galerideki küçük görsel boyutunu düşürür",
"title": "Küçük Görsel Boyutunu Düşür"
},
"deleteImage": {
"desc": "Seçili görseli sil",
"title": "Görseli Sil"
},
"invoke": {
"desc": "Görsel Oluştur",
"title": "Invoke"
},
"increaseGalleryThumbSize": {
"title": "Küçük Görsel Boyutunu Artır",
"desc": "Galerideki küçük görsel boyutunu artırır"
},
"setParameters": {
"title": "Değişkenleri Kullan",
"desc": "Seçili görselin tüm değişkenlerini kullan"
},
"setPrompt": {
"desc": "Seçili görselin istemini kullan",
"title": "İstemi Kullan"
},
"toggleLayer": {
"desc": "Maske/Taban katmanları arasında geçiş yapar",
"title": "Katmanı Gizle-Göster"
},
"upscale": {
"title": "Büyüt",
"desc": "Seçili görseli büyüt"
},
"setSeed": {
"title": "Tohumu Kullan",
"desc": "Seçili görselin tohumunu kullan"
},
"appHotkeys": "Uygulama",
"cancel": {
"desc": "Geçerli İşi Sil",
"title": "Vazgeç"
},
"sendToImageToImage": {
"title": "Görselden Görsel'e Gönder",
"desc": "Seçili görseli Görselden Görsel'e gönder"
},
"fillBoundingBox": {
"title": "Sınırlayıcı Kutuyu Doldur",
"desc": "Sınırlayıcı kutuyu fırçadaki renkle doldurur"
},
"moveTool": {
"desc": "Tuvalde kaymayı sağlar",
"title": "Kayma Aracı"
},
"redoStroke": {
"desc": "Fırça vuruşunu yinele",
"title": "Vuruşu Yinele"
},
"increaseBrushOpacity": {
"title": "Fırçanın Saydamlığını Düşür",
"desc": "Tuval fırçasının saydamlığını düşürür"
},
"selectEraser": {
"desc": "Tuval silgisini kullan",
"title": "Silgiyi Kullan"
},
"toggleOptions": {
"desc": "Ayarlar panelini aç-kapat",
"title": "Ayarları Aç-Kapat"
},
"copyToClipboard": {
"desc": "Tuval içeriğini kopyala",
"title": "Kopyala"
},
"galleryHotkeys": "Galeri",
"generalHotkeys": "Genel",
"mergeVisible": {
"desc": "Tuvalin görünür tüm katmanlarını birleştir",
"title": "Katmanları Birleştir"
},
"toggleGallery": {
"title": "Galeriyi Aç-Kapat",
"desc": "Galeri panelini aç-kapat"
},
"downloadImage": {
"title": "Görseli İndir",
"desc": "Tuval içeriğini indir"
},
"previousStagingImage": {
"title": "Önceki Görsel Parça",
"desc": "Önceki Görsel Parçayı Göster"
},
"increaseBrushSize": {
"title": "Fırça Boyutunu Artır",
"desc": "Tuval fırçasının/silgisinin boyutunu artırır"
},
"previousImage": {
"desc": "Galerideki önceki görseli göster",
"title": "Önceki Görsel"
},
"toggleOptionsAndGallery": {
"title": "Ayarları ve Galeriyi Aç-Kapat",
"desc": "Ayarlar ve galeri panellerini aç-kapat"
},
"toggleSnap": {
"desc": "Kılavuza Uydur",
"title": "Kılavuza Uydur"
},
"resetView": {
"desc": "Tuval Görüşünü Resetle",
"title": "Görüşü Resetle"
},
"cancelAndClear": {
"desc": "Geçerli işi geri çek ve sıradaki tüm işleri sil",
"title": "Vazgeç ve Sil"
},
"decreaseBrushSize": {
"title": "Fırça Boyutunu Düşür",
"desc": "Tuval fırçasının/silgisinin boyutunu düşürür"
},
"resetOptionsAndGallery": {
"desc": "Ayarlar ve galeri panellerini resetler",
"title": "Ayarları ve Galeriyi Resetle"
},
"remixImage": {
"desc": "Seçili görselin tohumu hariç tüm değişkenlerini kullan",
"title": "Benzerini Türet"
},
"undoStroke": {
"title": "Vuruşu Geri Al",
"desc": "Fırça vuruşunu geri al"
},
"saveToGallery": {
"title": "Galeriye Gönder",
"desc": "Tuval içeriğini galeriye gönder"
},
"unifiedCanvasHotkeys": "Tuval",
"addNodes": {
"desc": "Çizge ekleme menüsünü açar",
"title": "Çizge Ekle"
},
"eraseBoundingBox": {
"desc": "Sınırlayıcı kutunun içini boşaltır",
"title": "Sınırlayıcı Kutuyu Boşalt"
},
"selectBrush": {
"desc": "Tuval fırçasını kullan",
"title": "Fırçayı Kullan"
}
},
"embedding": {
"incompatibleModel": "Uyumsuz ana model:"
},
"unifiedCanvas": {
"accept": "Onayla",
"emptyTempImagesFolderMessage": "Geçici görsel klasörünü boşaltmak Tuvali resetler. Yineleme ve geri alma geçmişi, görsel parçası bölümü ve tuval taban katmanı da dolayısıla resetlenir.",
"clearCanvasHistoryMessage": "Tuval geçmişini silmek tuvale dokunmaz, ancak yineleme ve geri alma geçmişini geri dönülemez bir biçimde siler."
},
"nodes": {
"unableToValidateWorkflow": "İş Akışı Doğrulanamadı",
"workflowContact": "İletişim",
"loadWorkflow": "İş Akışı Yükle",
"workflowNotes": "Notlar",
"workflow": "İş Akışı",
"notesDescription": "İş akışınız hakkında not düşün",
"workflowTags": "Etiketler",
"workflowDescription": "Kısa Tanım",
"workflowValidation": "İş Akışı Doğrulama Sorunu",
"workflowVersion": "Sürüm",
"newWorkflow": "Yeni İş Akışı",
"currentImageDescription": "İşlemdeki görseli Çizge Düzenleyicide gösterir",
"workflowAuthor": "Yaratıcı",
"workflowName": "Ad",
"workflowSettings": "İş Akışı Düzenleyici Ayarları",
"currentImage": "İşlemdeki Görsel",
"noWorkflow": "İş Akışı Yok",
"newWorkflowDesc": "Yeni iş akışı?",
"problemReadingWorkflow": "Görselden iş akışı çağrılamadı",
"downloadWorkflow": "İş Akışını İndir (JSON)",
"unableToMigrateWorkflow": "İş Akışı Aktarılamadı",
"unknownErrorValidatingWorkflow": "İş akışını doğrulamada bilinmeyen bir sorun",
"unableToGetWorkflowVersion": "İş akışı sürümüne ulaşılamadı",
"unrecognizedWorkflowVersion": "Tanınmayan iş akışı sürümü {{version}}",
"newWorkflowDesc2": "Geçerli iş akışında kaydedilmemiş değişiklikler var.",
"unableToLoadWorkflow": "İş Akışı Yüklenemedi"
},
"workflows": {
"searchWorkflows": "İş Akışlarında Ara",
"workflowName": "İş Akışı Adı",
"problemSavingWorkflow": "İş Akışını Kaydetmede Sorun",
"saveWorkflow": "İş Akışını Kaydet",
"uploadWorkflow": "Dosyadan Yükle",
"newWorkflowCreated": "Yeni İş Akışı Yaratıldı",
"problemLoading": "İş Akışlarını Yüklemede Sorun",
"loading": "İş Akışları Yükleniyor",
"noDescription": "Tanımsız",
"workflowIsOpen": "İş Akışıık",
"clearWorkflowSearchFilter": "İş Akışı Aramasını Resetle",
"workflowEditorMenu": "İş Akışı Düzenleyici Menüsü",
"downloadWorkflow": "İndir",
"saveWorkflowAs": "İş Akışını Farklı Kaydet",
"savingWorkflow": "İş Akışı Kaydediliyor...",
"userWorkflows": "İş Akışlarım",
"defaultWorkflows": "Varsayılan İş Akışları",
"workflows": "İş Akışları",
"workflowLibrary": "Depo",
"deleteWorkflow": "İş Akışını Sil",
"unnamedWorkflow": "Adsız İş Akışı",
"noWorkflows": "İş Akışı Yok",
"workflowSaved": "İş Akışı Kaydedildi"
},
"toast": {
"problemDownloadingCanvasDesc": "Taban katman indirilemedi",
"problemSavingMaskDesc": "Maske kaydedilemedi",
"problemSavingCanvasDesc": "Taban katman kaydedilemedi",
"problemRetrievingWorkflow": "İş Akışını Getirmede Sorun",
"workflowDeleted": "İş Akışı Silindi",
"loadedWithWarnings": "İş Akışı Yüklendi Ancak Uyarılar Var",
"problemImportingMaskDesc": "Maske aktarılamadı",
"problemMergingCanvasDesc": "Taban katman aktarılamadı",
"problemCopyingCanvasDesc": "Taban katman aktarılamadı",
"workflowLoaded": "İş Akışı Yüklendi",
"problemDeletingWorkflow": "İş Akışını Silmede Sorun"
},
"parameters": {
"invoke": {
"noPrompts": "İstem oluşturulmadı"
}
},
"modelManager": {
"baseModel": "Ana Model"
},
"dynamicPrompts": {
"loading": "Devimsel İstemler Oluşturuluyor...",
"combinatorial": "Birleşimsel Oluşturma"
},
"models": {
"incompatibleBaseModel": "Uyumsuz ana model"
},
"settings": {
"generation": "Oluşturma"
}
}

View File

@ -6,9 +6,7 @@ const OPENAPI_URL = 'http://127.0.0.1:9090/openapi.json';
const OUTPUT_FILE = 'src/services/api/schema.ts';
async function main() {
process.stdout.write(
`Generating types "${OPENAPI_URL}" --> "${OUTPUT_FILE}"...`
);
process.stdout.write(`Generating types "${OPENAPI_URL}" --> "${OUTPUT_FILE}"...`);
const types = await openapiTS(OPENAPI_URL, {
exportType: true,
transform: (schemaObject) => {

View File

@ -1,4 +1,4 @@
import { Box, useGlobalModifiersInit } from '@invoke-ai/ui';
import { Box, useGlobalModifiersInit } from '@invoke-ai/ui-library';
import { useSocketIO } from 'app/hooks/useSocketIO';
import { useLogger } from 'app/logging/useLogger';
import { appStarted } from 'app/store/middleware/listenerMiddleware/listeners/appStarted';
@ -45,8 +45,7 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage }: Props) => {
useGlobalModifiersInit();
useGlobalHotkeys();
const { dropzone, isHandlingUpload, setIsHandlingUpload } =
useFullscreenDropzone();
const { dropzone, isHandlingUpload, setIsHandlingUpload } = useFullscreenDropzone();
const handleReset = useCallback(() => {
clearStorage();
@ -70,10 +69,7 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage }: Props) => {
}, [dispatch]);
return (
<ErrorBoundary
onReset={handleReset}
FallbackComponent={AppErrorBoundaryFallback}
>
<ErrorBoundary onReset={handleReset} FallbackComponent={AppErrorBoundaryFallback}>
<Box
id="invoke-app-wrapper"
w="100vw"
@ -86,10 +82,7 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage }: Props) => {
<InvokeTabs />
<AnimatePresence>
{dropzone.isDragActive && isHandlingUpload && (
<ImageUploadOverlay
dropzone={dropzone}
setIsHandlingUpload={setIsHandlingUpload}
/>
<ImageUploadOverlay dropzone={dropzone} setIsHandlingUpload={setIsHandlingUpload} />
)}
</AnimatePresence>
</Box>

View File

@ -1,12 +1,8 @@
import { Button, Flex, Heading, Link, Text, useToast } from '@invoke-ai/ui';
import { Button, Flex, Heading, Link, Text, useToast } from '@invoke-ai/ui-library';
import newGithubIssueUrl from 'new-github-issue-url';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import {
PiArrowCounterClockwiseBold,
PiArrowSquareOutBold,
PiCopyBold,
} from 'react-icons/pi';
import { PiArrowCounterClockwiseBold, PiArrowSquareOutBold, PiCopyBold } from 'react-icons/pi';
import { serializeError } from 'serialize-error';
type Props = {
@ -37,22 +33,8 @@ const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => {
[error.message, error.name]
);
return (
<Flex
layerStyle="body"
w="100vw"
h="100vh"
alignItems="center"
justifyContent="center"
p={4}
>
<Flex
layerStyle="first"
flexDir="column"
borderRadius="base"
justifyContent="center"
gap={8}
p={16}
>
<Flex layerStyle="body" w="100vw" h="100vh" alignItems="center" justifyContent="center" p={4}>
<Flex layerStyle="first" flexDir="column" borderRadius="base" justifyContent="center" gap={8} p={16}>
<Heading>{t('common.somethingWentWrong')}</Heading>
<Flex
layerStyle="second"
@ -68,19 +50,14 @@ const AppErrorBoundaryFallback = ({ error, resetErrorBoundary }: Props) => {
</Text>
</Flex>
<Flex gap={4}>
<Button
leftIcon={<PiArrowCounterClockwiseBold />}
onClick={resetErrorBoundary}
>
<Button leftIcon={<PiArrowCounterClockwiseBold />} onClick={resetErrorBoundary}>
{t('accessibility.resetUI')}
</Button>
<Button leftIcon={<PiCopyBold />} onClick={handleCopy}>
{t('common.copyError')}
</Button>
<Link href={url} isExternal>
<Button leftIcon={<PiArrowSquareOutBold />}>
{t('accessibility.createIssue')}
</Button>
<Button leftIcon={<PiArrowSquareOutBold />}>{t('accessibility.createIssue')}</Button>
</Link>
</Flex>
</Flex>

View File

@ -10,13 +10,16 @@ import { $customStarUI } from 'app/store/nanostores/customStarUI';
import { $galleryHeader } from 'app/store/nanostores/galleryHeader';
import { $isDebugging } from 'app/store/nanostores/isDebugging';
import { $logo } from 'app/store/nanostores/logo';
import { $openAPISchemaUrl } from 'app/store/nanostores/openAPISchemaUrl';
import { $projectId } from 'app/store/nanostores/projectId';
import { $queueId, DEFAULT_QUEUE_ID } from 'app/store/nanostores/queueId';
import { $store } from 'app/store/nanostores/store';
import { $workflowCategories } from 'app/store/nanostores/workflowCategories';
import { createStore } from 'app/store/store';
import type { PartialAppConfig } from 'app/types/invokeai';
import Loading from 'common/components/Loading/Loading';
import AppDndContext from 'features/dnd/components/AppDndContext';
import type { WorkflowCategory } from 'features/nodes/types/workflow';
import type { PropsWithChildren, ReactNode } from 'react';
import React, { lazy, memo, useEffect, useMemo } from 'react';
import { Provider } from 'react-redux';
@ -28,6 +31,7 @@ const ThemeLocaleProvider = lazy(() => import('./ThemeLocaleProvider'));
interface Props extends PropsWithChildren {
apiUrl?: string;
openAPISchemaUrl?: string;
token?: string;
config?: PartialAppConfig;
customNavComponent?: ReactNode;
@ -43,10 +47,12 @@ interface Props extends PropsWithChildren {
socketOptions?: Partial<ManagerOptions & SocketOptions>;
isDebugging?: boolean;
logo?: ReactNode;
workflowCategories?: WorkflowCategory[];
}
const InvokeAIUI = ({
apiUrl,
openAPISchemaUrl,
token,
config,
customNavComponent,
@ -59,6 +65,7 @@ const InvokeAIUI = ({
socketOptions,
isDebugging = false,
logo,
workflowCategories,
}: Props) => {
useEffect(() => {
// configure API client token
@ -123,6 +130,16 @@ const InvokeAIUI = ({
};
}, [customNavComponent]);
useEffect(() => {
if (openAPISchemaUrl) {
$openAPISchemaUrl.set(openAPISchemaUrl);
}
return () => {
$openAPISchemaUrl.set(undefined);
};
}, [openAPISchemaUrl]);
useEffect(() => {
if (galleryHeader) {
$galleryHeader.set(galleryHeader);
@ -143,6 +160,16 @@ const InvokeAIUI = ({
};
}, [logo]);
useEffect(() => {
if (workflowCategories) {
$workflowCategories.set(workflowCategories);
}
return () => {
$workflowCategories.set([]);
};
}, [workflowCategories]);
useEffect(() => {
if (socketOptions) {
$socketOptions.set(socketOptions);

View File

@ -1,13 +1,7 @@
import '@fontsource-variable/inter';
import 'overlayscrollbars/overlayscrollbars.css';
import {
ChakraProvider,
DarkMode,
extendTheme,
theme as _theme,
TOAST_OPTIONS,
} from '@invoke-ai/ui';
import { ChakraProvider, DarkMode, extendTheme, theme as _theme, TOAST_OPTIONS } from '@invoke-ai/ui-library';
import type { ReactNode } from 'react';
import { memo, useEffect, useMemo } from 'react';
import { useTranslation } from 'react-i18next';

View File

@ -1,4 +1,4 @@
import { useToast } from '@invoke-ai/ui';
import { useToast } from '@invoke-ai/ui-library';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { addToast, clearToastQueue } from 'features/system/store/systemSlice';
import type { MakeToastArg } from 'features/system/util/makeToast';
@ -36,10 +36,7 @@ const Toaster = () => {
*/
export const useAppToaster = () => {
const dispatch = useAppDispatch();
const toaster = useCallback(
(arg: MakeToastArg) => dispatch(addToast(makeToast(arg))),
[dispatch]
);
const toaster = useCallback((arg: MakeToastArg) => dispatch(addToast(makeToast(arg))), [dispatch]);
return toaster;
};

View File

@ -6,10 +6,7 @@ import { useAppDispatch } from 'app/store/storeHooks';
import type { MapStore } from 'nanostores';
import { atom, map } from 'nanostores';
import { useEffect, useMemo } from 'react';
import type {
ClientToServerEvents,
ServerToClientEvents,
} from 'services/events/types';
import type { ClientToServerEvents, ServerToClientEvents } from 'services/events/types';
import { setEventListeners } from 'services/events/util/setEventListeners';
import type { ManagerOptions, Socket, SocketOptions } from 'socket.io-client';
import { io } from 'socket.io-client';
@ -45,7 +42,7 @@ export const useSocketIO = () => {
const socketOptions = useMemo(() => {
const options: Partial<ManagerOptions & SocketOptions> = {
timeout: 60000,
path: '/ws/socket.io',
path: baseUrl ? '/ws/socket.io' : `${window.location.pathname}ws/socket.io`,
autoConnect: false, // achtung! removing this breaks the dynamic middleware
forceNew: true,
};
@ -56,7 +53,7 @@ export const useSocketIO = () => {
}
return { ...options, ...addlSocketOptions };
}, [authToken, addlSocketOptions]);
}, [authToken, addlSocketOptions, baseUrl]);
useEffect(() => {
if ($isSocketInitialized.get()) {
@ -64,10 +61,7 @@ export const useSocketIO = () => {
return;
}
const socket: Socket<ServerToClientEvents, ClientToServerEvents> = io(
socketUrl,
socketOptions
);
const socket: Socket<ServerToClientEvents, ClientToServerEvents> = io(socketUrl, socketOptions);
setEventListeners({ dispatch, socket });
socket.connect();

View File

@ -30,20 +30,11 @@ export type LoggerNamespace =
| 'queue'
| 'dnd';
export const logger = (namespace: LoggerNamespace) =>
$logger.get().child({ namespace });
export const logger = (namespace: LoggerNamespace) => $logger.get().child({ namespace });
export const zLogLevel = z.enum([
'trace',
'debug',
'info',
'warn',
'error',
'fatal',
]);
export const zLogLevel = z.enum(['trace', 'debug', 'info', 'warn', 'error', 'fatal']);
export type LogLevel = z.infer<typeof zLogLevel>;
export const isLogLevel = (v: unknown): v is LogLevel =>
zLogLevel.safeParse(v).success;
export const isLogLevel = (v: unknown): v is LogLevel => zLogLevel.safeParse(v).success;
// Translate human-readable log levels to numbers, used for log filtering
export const LOG_LEVEL_MAP: Record<LogLevel, number> = {

View File

@ -17,10 +17,7 @@ export const useLogger = (namespace: LoggerNamespace) => {
localStorage.setItem('ROARR_LOG', 'true');
// Use a filter to show only logs of the given level
localStorage.setItem(
'ROARR_FILTER',
`context.logLevel:>=${LOG_LEVEL_MAP[consoleLogLevel]}`
);
localStorage.setItem('ROARR_FILTER', `context.logLevel:>=${LOG_LEVEL_MAP[consoleLogLevel]}`);
} else {
// Disable console log output
localStorage.setItem('ROARR_LOG', 'false');

View File

@ -1,8 +1,4 @@
import {
createDraftSafeSelectorCreator,
createSelectorCreator,
lruMemoize,
} from '@reduxjs/toolkit';
import { createDraftSafeSelectorCreator, createSelectorCreator, lruMemoize } from '@reduxjs/toolkit';
import type { GetSelectorsOptions } from '@reduxjs/toolkit/dist/entities/state_selectors';
import { isEqual } from 'lodash-es';

View File

@ -1,19 +1,12 @@
import { StorageError } from 'app/store/enhancers/reduxRemember/errors';
import { $projectId } from 'app/store/nanostores/projectId';
import type { UseStore } from 'idb-keyval';
import {
clear,
createStore as createIDBKeyValStore,
get,
set,
} from 'idb-keyval';
import { clear, createStore as createIDBKeyValStore, get, set } from 'idb-keyval';
import { action, atom } from 'nanostores';
import type { Driver } from 'redux-remember';
// Create a custom idb-keyval store (just needed to customize the name)
export const $idbKeyValStore = atom<UseStore>(
createIDBKeyValStore('invoke', 'invoke-store')
);
export const $idbKeyValStore = atom<UseStore>(createIDBKeyValStore('invoke', 'invoke-store'));
export const clearIdbKeyValStore = action($idbKeyValStore, 'clear', (store) => {
clear(store.get());

View File

@ -4,13 +4,12 @@ import { diff } from 'jsondiffpatch';
/**
* Super simple logger middleware. Useful for debugging when the redux devtools are awkward.
*/
export const debugLoggerMiddleware: Middleware =
(api: MiddlewareAPI) => (next) => (action) => {
const originalState = api.getState();
console.log('REDUX: dispatching', action);
const result = next(action);
const nextState = api.getState();
console.log('REDUX: next state', nextState);
console.log('REDUX: diff', diff(originalState, nextState));
return result;
};
export const debugLoggerMiddleware: Middleware = (api: MiddlewareAPI) => (next) => (action) => {
const originalState = api.getState();
console.log('REDUX: dispatching', action);
const result = next(action);
const nextState = api.getState();
console.log('REDUX: next state', nextState);
console.log('REDUX: diff', diff(originalState, nextState));
return result;
};

View File

@ -35,8 +35,7 @@ export const actionSanitizer = <A extends UnknownAction>(action: A): A => {
if (socketGeneratorProgress.match(action)) {
const sanitized = cloneDeep(action);
if (sanitized.payload.data.progress_image) {
sanitized.payload.data.progress_image.dataURL =
'<Progress image omitted>';
sanitized.payload.data.progress_image.dataURL = '<Progress image omitted>';
}
return sanitized;
}

View File

@ -1,9 +1,4 @@
import type {
ListenerEffect,
TypedAddListener,
TypedStartListening,
UnknownAction,
} from '@reduxjs/toolkit';
import type { ListenerEffect, TypedAddListener, TypedStartListening, UnknownAction } from '@reduxjs/toolkit';
import { addListener, createListenerMiddleware } from '@reduxjs/toolkit';
import { addGalleryImageClickedListener } from 'app/store/middleware/listenerMiddleware/listeners/galleryImageClicked';
import type { AppDispatch, RootState } from 'app/store/store';
@ -47,10 +42,7 @@ import {
import { addImagesStarredListener } from './listeners/imagesStarred';
import { addImagesUnstarredListener } from './listeners/imagesUnstarred';
import { addImageToDeleteSelectedListener } from './listeners/imageToDeleteSelected';
import {
addImageUploadedFulfilledListener,
addImageUploadedRejectedListener,
} from './listeners/imageUploaded';
import { addImageUploadedFulfilledListener, addImageUploadedRejectedListener } from './listeners/imageUploaded';
import { addInitialImageSelectedListener } from './listeners/initialImageSelected';
import { addModelSelectedListener } from './listeners/modelSelected';
import { addModelsLoadedListener } from './listeners/modelsLoaded';
@ -78,19 +70,11 @@ export const listenerMiddleware = createListenerMiddleware();
export type AppStartListening = TypedStartListening<RootState, AppDispatch>;
export const startAppListening =
listenerMiddleware.startListening as AppStartListening;
export const startAppListening = listenerMiddleware.startListening as AppStartListening;
export const addAppListener = addListener as TypedAddListener<
RootState,
AppDispatch
>;
export const addAppListener = addListener as TypedAddListener<RootState, AppDispatch>;
export type AppListenerEffect = ListenerEffect<
UnknownAction,
RootState,
AppDispatch
>;
export type AppListenerEffect = ListenerEffect<UnknownAction, RootState, AppDispatch>;
/**
* The RTK listener middleware is a lightweight alternative sagas/observables.

View File

@ -1,10 +1,6 @@
import { isAnyOf } from '@reduxjs/toolkit';
import { logger } from 'app/logging/logger';
import {
canvasBatchIdsReset,
commitStagingAreaImage,
discardStagedImages,
} from 'features/canvas/store/canvasSlice';
import { canvasBatchIdsReset, commitStagingAreaImage, discardStagedImages } from 'features/canvas/store/canvasSlice';
import { addToast } from 'features/system/store/systemSlice';
import { t } from 'i18next';
import { queueApi } from 'services/api/endpoints/queue';
@ -23,10 +19,7 @@ export const addCommitStagingAreaImageListener = () => {
try {
const req = dispatch(
queueApi.endpoints.cancelByBatchIds.initiate(
{ batch_ids: batchIds },
{ fixedCacheKey: 'cancelByBatchIds' }
)
queueApi.endpoints.cancelByBatchIds.initiate({ batch_ids: batchIds }, { fixedCacheKey: 'cancelByBatchIds' })
);
const { canceled } = await req.unwrap();
req.reset();

View File

@ -12,15 +12,9 @@ export const appStarted = createAction('app/appStarted');
export const addFirstListImagesListener = () => {
startAppListening({
matcher: imagesApi.endpoints.listImages.matchFulfilled,
effect: async (
action,
{ dispatch, unsubscribe, cancelActiveListeners }
) => {
effect: async (action, { dispatch, unsubscribe, cancelActiveListeners }) => {
// Only run this listener on the first listImages request for no-board images
if (
action.meta.arg.queryCacheKey !==
getListImagesUrl({ board_id: 'none', categories: IMAGE_CATEGORIES })
) {
if (action.meta.arg.queryCacheKey !== getListImagesUrl({ board_id: 'none', categories: IMAGE_CATEGORIES })) {
return;
}

View File

@ -1,8 +1,5 @@
import { setInfillMethod } from 'features/parameters/store/generationSlice';
import {
shouldUseNSFWCheckerChanged,
shouldUseWatermarkerChanged,
} from 'features/system/store/systemSlice';
import { shouldUseNSFWCheckerChanged, shouldUseWatermarkerChanged } from 'features/system/store/systemSlice';
import { appInfoApi } from 'services/api/endpoints/appInfo';
import { startAppListening } from '..';
@ -11,11 +8,7 @@ export const addAppConfigReceivedListener = () => {
startAppListening({
matcher: appInfoApi.endpoints.getAppConfig.matchFulfilled,
effect: async (action, { getState, dispatch }) => {
const {
infill_methods = [],
nsfw_methods = [],
watermarking_methods = [],
} = action.payload;
const { infill_methods = [], nsfw_methods = [], watermarking_methods = [] } = action.payload;
const infillMethod = getState().generation.infillMethod;
if (!infill_methods.includes(infillMethod)) {

View File

@ -1,4 +1,4 @@
import { createStandaloneToast, theme, TOAST_OPTIONS } from '@invoke-ai/ui';
import { createStandaloneToast, theme, TOAST_OPTIONS } from '@invoke-ai/ui-library';
import { logger } from 'app/logging/logger';
import { parseify } from 'common/util/serialize';
import { zPydanticValidationError } from 'features/system/store/zodSchemas';
@ -20,10 +20,7 @@ export const addBatchEnqueuedListener = () => {
effect: async (action) => {
const response = action.payload;
const arg = action.meta.arg.originalArgs;
logger('queue').debug(
{ enqueueResult: parseify(response) },
'Batch enqueued'
);
logger('queue').debug({ enqueueResult: parseify(response) }, 'Batch enqueued');
if (!toast.isActive('batch-queued')) {
toast({
@ -53,10 +50,7 @@ export const addBatchEnqueuedListener = () => {
status: 'error',
description: 'Unknown Error',
});
logger('queue').error(
{ batchConfig: parseify(arg), error: parseify(response) },
t('queue.batchFailedToQueue')
);
logger('queue').error({ batchConfig: parseify(arg), error: parseify(response) }, t('queue.batchFailedToQueue'));
return;
}
@ -81,10 +75,7 @@ export const addBatchEnqueuedListener = () => {
status: 'error',
});
}
logger('queue').error(
{ batchConfig: parseify(arg), error: parseify(response) },
t('queue.batchFailedToQueue')
);
logger('queue').error({ batchConfig: parseify(arg), error: parseify(response) }, t('queue.batchFailedToQueue'));
},
});
};

View File

@ -22,13 +22,7 @@ export const addDeleteBoardAndImagesFulfilledListener = () => {
const { generation, canvas, nodes, controlAdapters } = getState();
deleted_images.forEach((image_name) => {
const imageUsage = getImageUsage(
generation,
canvas,
nodes,
controlAdapters,
image_name
);
const imageUsage = getImageUsage(generation, canvas, nodes, controlAdapters, image_name);
if (imageUsage.isInitialImage && !wasInitialImageReset) {
dispatch(clearInitialImage());

View File

@ -1,13 +1,6 @@
import { isAnyOf } from '@reduxjs/toolkit';
import {
boardIdSelected,
galleryViewChanged,
imageSelected,
} from 'features/gallery/store/gallerySlice';
import {
ASSETS_CATEGORIES,
IMAGE_CATEGORIES,
} from 'features/gallery/store/types';
import { boardIdSelected, galleryViewChanged, imageSelected } from 'features/gallery/store/gallerySlice';
import { ASSETS_CATEGORIES, IMAGE_CATEGORIES } from 'features/gallery/store/types';
import { imagesApi } from 'services/api/endpoints/images';
import { imagesSelectors } from 'services/api/util';
@ -16,55 +9,38 @@ import { startAppListening } from '..';
export const addBoardIdSelectedListener = () => {
startAppListening({
matcher: isAnyOf(boardIdSelected, galleryViewChanged),
effect: async (
action,
{ getState, dispatch, condition, cancelActiveListeners }
) => {
effect: async (action, { getState, dispatch, condition, cancelActiveListeners }) => {
// Cancel any in-progress instances of this listener, we don't want to select an image from a previous board
cancelActiveListeners();
const state = getState();
const board_id = boardIdSelected.match(action)
? action.payload.boardId
: state.gallery.selectedBoardId;
const board_id = boardIdSelected.match(action) ? action.payload.boardId : state.gallery.selectedBoardId;
const galleryView = galleryViewChanged.match(action)
? action.payload
: state.gallery.galleryView;
const galleryView = galleryViewChanged.match(action) ? action.payload : state.gallery.galleryView;
// when a board is selected, we need to wait until the board has loaded *some* images, then select the first one
const categories =
galleryView === 'images' ? IMAGE_CATEGORIES : ASSETS_CATEGORIES;
const categories = galleryView === 'images' ? IMAGE_CATEGORIES : ASSETS_CATEGORIES;
const queryArgs = { board_id: board_id ?? 'none', categories };
// wait until the board has some images - maybe it already has some from a previous fetch
// must use getState() to ensure we do not have stale state
const isSuccess = await condition(
() =>
imagesApi.endpoints.listImages.select(queryArgs)(getState())
.isSuccess,
() => imagesApi.endpoints.listImages.select(queryArgs)(getState()).isSuccess,
5000
);
if (isSuccess) {
// the board was just changed - we can select the first image
const { data: boardImagesData } =
imagesApi.endpoints.listImages.select(queryArgs)(getState());
const { data: boardImagesData } = imagesApi.endpoints.listImages.select(queryArgs)(getState());
if (
boardImagesData &&
boardIdSelected.match(action) &&
action.payload.selectedImageName
) {
if (boardImagesData && boardIdSelected.match(action) && action.payload.selectedImageName) {
const selectedImage = imagesSelectors.selectById(boardImagesData, action.payload.selectedImageName);
dispatch(imageSelected(selectedImage || null));
} else if (boardImagesData) {
const firstImage = imagesSelectors.selectAll(boardImagesData)[0];
const selectedImage = imagesSelectors.selectById(
boardImagesData,
action.payload.selectedImageName
);
dispatch(imageSelected(selectedImage || firstImage || null));
dispatch(imageSelected(firstImage || null));
} else {
// board has no images - deselect
dispatch(imageSelected(null));

View File

@ -11,9 +11,7 @@ export const addCanvasCopiedToClipboardListener = () => {
startAppListening({
actionCreator: canvasCopiedToClipboard,
effect: async (action, { dispatch, getState }) => {
const moduleLog = $logger
.get()
.child({ namespace: 'canvasCopiedToClipboardListener' });
const moduleLog = $logger.get().child({ namespace: 'canvasCopiedToClipboardListener' });
const state = getState();
try {

View File

@ -11,9 +11,7 @@ export const addCanvasDownloadedAsImageListener = () => {
startAppListening({
actionCreator: canvasDownloadedAsImage,
effect: async (action, { dispatch, getState }) => {
const moduleLog = $logger
.get()
.child({ namespace: 'canvasSavedToGalleryListener' });
const moduleLog = $logger.get().child({ namespace: 'canvasSavedToGalleryListener' });
const state = getState();
let blob;
@ -32,9 +30,7 @@ export const addCanvasDownloadedAsImageListener = () => {
}
downloadBlob(blob, 'canvas.png');
dispatch(
addToast({ title: t('toast.canvasDownloaded'), status: 'success' })
);
dispatch(addToast({ title: t('toast.canvasDownloaded'), status: 'success' }));
},
});
};

View File

@ -13,9 +13,7 @@ export const addCanvasMergedListener = () => {
startAppListening({
actionCreator: canvasMerged,
effect: async (action, { dispatch }) => {
const moduleLog = $logger
.get()
.child({ namespace: 'canvasCopiedToClipboardListener' });
const moduleLog = $logger.get().child({ namespace: 'canvasCopiedToClipboardListener' });
const blob = await getFullBaseLayerBlob();
if (!blob) {

View File

@ -21,11 +21,7 @@ type AnyControlAdapterParamChangeAction =
| ReturnType<typeof controlAdapterProcessortTypeChanged>
| ReturnType<typeof controlAdapterAutoConfigToggled>;
const predicate: AnyListenerPredicate<RootState> = (
action,
state,
prevState
) => {
const predicate: AnyListenerPredicate<RootState> = (action, state, prevState) => {
const isActionMatched =
controlAdapterProcessorParamsChanged.match(action) ||
controlAdapterModelChanged.match(action) ||
@ -40,12 +36,7 @@ const predicate: AnyListenerPredicate<RootState> = (
const { id } = action.payload;
const prevCA = selectControlAdapterById(prevState.controlAdapters, id);
const ca = selectControlAdapterById(state.controlAdapters, id);
if (
!prevCA ||
!isControlNetOrT2IAdapter(prevCA) ||
!ca ||
!isControlNetOrT2IAdapter(ca)
) {
if (!prevCA || !isControlNetOrT2IAdapter(prevCA) || !ca || !isControlNetOrT2IAdapter(ca)) {
return false;
}

View File

@ -64,37 +64,28 @@ export const addControlNetImageProcessedListener = () => {
);
const enqueueResult = await req.unwrap();
req.reset();
log.debug(
{ enqueueResult: parseify(enqueueResult) },
t('queue.graphQueued')
);
log.debug({ enqueueResult: parseify(enqueueResult) }, t('queue.graphQueued'));
const [invocationCompleteAction] = await take(
(action): action is ReturnType<typeof socketInvocationComplete> =>
socketInvocationComplete.match(action) &&
action.payload.data.queue_batch_id ===
enqueueResult.batch.batch_id &&
action.payload.data.queue_batch_id === enqueueResult.batch.batch_id &&
action.payload.data.source_node_id === nodeId
);
// We still have to check the output type
if (isImageOutput(invocationCompleteAction.payload.data.result)) {
const { image_name } =
invocationCompleteAction.payload.data.result.image;
const { image_name } = invocationCompleteAction.payload.data.result.image;
// Wait for the ImageDTO to be received
const [{ payload }] = await take(
(action) =>
imagesApi.endpoints.getImageDTO.matchFulfilled(action) &&
action.payload.image_name === image_name
imagesApi.endpoints.getImageDTO.matchFulfilled(action) && action.payload.image_name === image_name
);
const processedControlImage = payload as ImageDTO;
log.debug(
{ controlNetId: action.payload, processedControlImage },
'ControlNet image processed'
);
log.debug({ controlNetId: action.payload, processedControlImage }, 'ControlNet image processed');
// Update the processed image in the store
dispatch(
@ -105,10 +96,7 @@ export const addControlNetImageProcessedListener = () => {
);
}
} catch (error) {
log.error(
{ enqueueBatchArg: parseify(enqueueBatchArg) },
t('queue.graphFailedToQueue')
);
log.error({ enqueueBatchArg: parseify(enqueueBatchArg) }, t('queue.graphFailedToQueue'));
if (error instanceof Object) {
if ('data' in error && 'status' in error) {

View File

@ -2,10 +2,7 @@ import { logger } from 'app/logging/logger';
import { enqueueRequested } from 'app/store/actions';
import openBase64ImageInTab from 'common/util/openBase64ImageInTab';
import { parseify } from 'common/util/serialize';
import {
canvasBatchIdAdded,
stagingAreaInitialized,
} from 'features/canvas/store/canvasSlice';
import { canvasBatchIdAdded, stagingAreaInitialized } from 'features/canvas/store/canvasSlice';
import { blobToDataURL } from 'features/canvas/util/blobToDataURL';
import { getCanvasData } from 'features/canvas/util/getCanvasData';
import { getCanvasGenerationMode } from 'features/canvas/util/getCanvasGenerationMode';
@ -34,20 +31,14 @@ import { startAppListening } from '..';
export const addEnqueueRequestedCanvasListener = () => {
startAppListening({
predicate: (action): action is ReturnType<typeof enqueueRequested> =>
enqueueRequested.match(action) &&
action.payload.tabName === 'unifiedCanvas',
enqueueRequested.match(action) && action.payload.tabName === 'unifiedCanvas',
effect: async (action, { getState, dispatch }) => {
const log = logger('queue');
const { prepend } = action.payload;
const state = getState();
const {
layerState,
boundingBoxCoordinates,
boundingBoxDimensions,
isMaskEnabled,
shouldPreserveMaskedArea,
} = state.canvas;
const { layerState, boundingBoxCoordinates, boundingBoxDimensions, isMaskEnabled, shouldPreserveMaskedArea } =
state.canvas;
// Build canvas blobs
const canvasBlobsAndImageData = await getCanvasData(
@ -63,14 +54,10 @@ export const addEnqueueRequestedCanvasListener = () => {
return;
}
const { baseBlob, baseImageData, maskBlob, maskImageData } =
canvasBlobsAndImageData;
const { baseBlob, baseImageData, maskBlob, maskImageData } = canvasBlobsAndImageData;
// Determine the generation mode
const generationMode = getCanvasGenerationMode(
baseImageData,
maskImageData
);
const generationMode = getCanvasGenerationMode(baseImageData, maskImageData);
if (state.system.enableImageDebugging) {
const baseDataURL = await blobToDataURL(baseBlob);
@ -115,12 +102,7 @@ export const addEnqueueRequestedCanvasListener = () => {
).unwrap();
}
const graph = buildCanvasGraph(
state,
generationMode,
canvasInitImage,
canvasMaskImage
);
const graph = buildCanvasGraph(state, generationMode, canvasInitImage, canvasMaskImage);
log.debug({ graph: parseify(graph) }, `Canvas graph built`);

View File

@ -11,9 +11,7 @@ import { startAppListening } from '..';
export const addEnqueueRequestedLinear = () => {
startAppListening({
predicate: (action): action is ReturnType<typeof enqueueRequested> =>
enqueueRequested.match(action) &&
(action.payload.tabName === 'txt2img' ||
action.payload.tabName === 'img2img'),
enqueueRequested.match(action) && (action.payload.tabName === 'txt2img' || action.payload.tabName === 'img2img'),
effect: async (action, { getState, dispatch }) => {
const state = getState();
const model = state.generation.model;

View File

@ -32,8 +32,7 @@ export const addGalleryImageClickedListener = () => {
const { imageDTO, shiftKey, ctrlKey, metaKey } = action.payload;
const state = getState();
const queryArgs = selectListImagesQueryArgs(state);
const { data: listImagesData } =
imagesApi.endpoints.listImages.select(queryArgs)(state);
const { data: listImagesData } = imagesApi.endpoints.listImages.select(queryArgs)(state);
if (!listImagesData) {
// Should never happen if we have clicked a gallery image
@ -46,12 +45,8 @@ export const addGalleryImageClickedListener = () => {
if (shiftKey) {
const rangeEndImageName = imageDTO.image_name;
const lastSelectedImage = selection[selection.length - 1]?.image_name;
const lastClickedIndex = imageDTOs.findIndex(
(n) => n.image_name === lastSelectedImage
);
const currentClickedIndex = imageDTOs.findIndex(
(n) => n.image_name === rangeEndImageName
);
const lastClickedIndex = imageDTOs.findIndex((n) => n.image_name === lastSelectedImage);
const currentClickedIndex = imageDTOs.findIndex((n) => n.image_name === rangeEndImageName);
if (lastClickedIndex > -1 && currentClickedIndex > -1) {
// We have a valid range!
const start = Math.min(lastClickedIndex, currentClickedIndex);
@ -60,15 +55,8 @@ export const addGalleryImageClickedListener = () => {
dispatch(selectionChanged(selection.concat(imagesToSelect)));
}
} else if (ctrlKey || metaKey) {
if (
selection.some((i) => i.image_name === imageDTO.image_name) &&
selection.length > 1
) {
dispatch(
selectionChanged(
selection.filter((n) => n.image_name !== imageDTO.image_name)
)
);
if (selection.some((i) => i.image_name === imageDTO.image_name) && selection.length > 1) {
dispatch(selectionChanged(selection.filter((n) => n.image_name !== imageDTO.image_name)));
} else {
dispatch(selectionChanged(selection.concat(imageDTO)));
}

Some files were not shown because too many files have changed in this diff Show More