Merge branch 'v2.3' into dev/fix-codeowners

This commit is contained in:
Lincoln Stein 2023-03-06 18:11:19 -05:00 committed by GitHub
commit 21433a948c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
56 changed files with 1823 additions and 488 deletions

View File

@ -1,5 +1,8 @@
root = true
# All files # All files
[*] [*]
max_line_length = 80
charset = utf-8 charset = utf-8
end_of_line = lf end_of_line = lf
indent_size = 2 indent_size = 2
@ -10,3 +13,18 @@ trim_trailing_whitespace = true
# Python # Python
[*.py] [*.py]
indent_size = 4 indent_size = 4
max_line_length = 120
# css
[*.css]
indent_size = 4
# flake8
[.flake8]
indent_size = 4
# Markdown MkDocs
[docs/**/*.md]
max_line_length = 80
indent_size = 4
indent_style = unset

37
.flake8 Normal file
View File

@ -0,0 +1,37 @@
[flake8]
max-line-length = 120
extend-ignore =
# See https://github.com/PyCQA/pycodestyle/issues/373
E203,
# use Bugbear's B950 instead
E501,
# from black repo https://github.com/psf/black/blob/main/.flake8
E266, W503, B907
extend-select =
# Bugbear line length
B950
extend-exclude =
scripts/orig_scripts/*
ldm/models/*
ldm/modules/*
ldm/data/*
ldm/generate.py
ldm/util.py
ldm/simplet2i.py
per-file-ignores =
# B950 line too long
# W605 invalid escape sequence
# F841 assigned to but never used
# F401 imported but unused
tests/test_prompt_parser.py: B950, W605, F401
tests/test_textual_inversion.py: F841, B950
# B023 Function definition does not bind loop variable
scripts/legacy_api.py: F401, B950, B023, F841
ldm/invoke/__init__.py: F401
# B010 Do not call setattr with a constant attribute value
ldm/invoke/server_legacy.py: B010
# =====================
# flake-quote settings:
# =====================
# Set this to match black style:
inline-quotes = double

View File

@ -9,6 +9,10 @@ jobs:
mkdocs-material: mkdocs-material:
if: github.event.pull_request.draft == false if: github.event.pull_request.draft == false
runs-on: ubuntu-latest runs-on: ubuntu-latest
env:
REPO_URL: '${{ github.server_url }}/${{ github.repository }}'
REPO_NAME: '${{ github.repository }}'
SITE_URL: 'https://${{ github.repository_owner }}.github.io/InvokeAI'
steps: steps:
- name: checkout sources - name: checkout sources
uses: actions/checkout@v3 uses: actions/checkout@v3
@ -19,11 +23,15 @@ jobs:
uses: actions/setup-python@v4 uses: actions/setup-python@v4
with: with:
python-version: '3.10' python-version: '3.10'
cache: pip
cache-dependency-path: pyproject.toml
- name: install requirements - name: install requirements
env:
PIP_USE_PEP517: 1
run: | run: |
python -m \ python -m \
pip install -r docs/requirements-mkdocs.txt pip install ".[docs]"
- name: confirm buildability - name: confirm buildability
run: | run: |

41
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,41 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/psf/black
rev: 23.1.0
hooks:
- id: black
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
- repo: https://github.com/PyCQA/flake8
rev: 6.0.0
hooks:
- id: flake8
additional_dependencies:
- flake8-black
- flake8-bugbear
- flake8-comprehensions
- flake8-simplify
- repo: https://github.com/pre-commit/mirrors-prettier
rev: 'v3.0.0-alpha.4'
hooks:
- id: prettier
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-added-large-files
- id: check-executables-have-shebangs
- id: check-shebang-scripts-are-executable
- id: check-merge-conflict
- id: check-symlinks
- id: check-toml
- id: end-of-file-fixer
- id: no-commit-to-branch
args: ['--branch', 'main']
- id: trailing-whitespace

14
.prettierignore Normal file
View File

@ -0,0 +1,14 @@
invokeai/frontend/.husky
invokeai/frontend/patches
# Ignore artifacts:
build
coverage
static
invokeai/frontend/dist
# Ignore all HTML files:
*.html
# Ignore deprecated docs
docs/installation/deprecated_documentation

View File

@ -1,9 +1,9 @@
endOfLine: lf
tabWidth: 2
useTabs: false
singleQuote: true
quoteProps: as-needed
embeddedLanguageFormatting: auto embeddedLanguageFormatting: auto
endOfLine: lf
singleQuote: true
semi: true
trailingComma: es5
useTabs: false
overrides: overrides:
- files: '*.md' - files: '*.md'
options: options:
@ -11,3 +11,9 @@ overrides:
printWidth: 80 printWidth: 80
parser: markdown parser: markdown
cursorOffset: -1 cursorOffset: -1
- files: docs/**/*.md
options:
tabWidth: 4
- files: 'invokeai/frontend/public/locales/*.json'
options:
tabWidth: 4

5
docs/.markdownlint.jsonc Normal file
View File

@ -0,0 +1,5 @@
{
"MD046": false,
"MD007": false,
"MD030": false
}

View File

@ -2,62 +2,82 @@
title: Overview title: Overview
--- ---
Here you can find the documentation for InvokeAI's various features. - The Basics
## The Basics - The [Web User Interface](WEB.md)
### * The [Web User Interface](WEB.md)
Guide to the Web interface. Also see the [WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
### * The [Unified Canvas](UNIFIED_CANVAS.md) Guide to the Web interface. Also see the
Build complex scenes by combine and modifying multiple images in a stepwise [WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
fashion. This feature combines img2img, inpainting and outpainting in
a single convenient digital artist-optimized user interface.
### * The [Command Line Interface (CLI)](CLI.md) - The [Unified Canvas](UNIFIED_CANVAS.md)
Scriptable access to InvokeAI's features.
## Image Generation Build complex scenes by combine and modifying multiple images in a
### * [Prompt Engineering](PROMPTS.md) stepwise fashion. This feature combines img2img, inpainting and
Get the images you want with the InvokeAI prompt engineering language. outpainting in a single convenient digital artist-optimized user
interface.
## * [Post-Processing](POSTPROCESS.md) - The [Command Line Interface (CLI)](CLI.md)
Restore mangled faces and make images larger with upscaling. Also see the [Embiggen Upscaling Guide](EMBIGGEN.md).
## * The [Concepts Library](CONCEPTS.md) Scriptable access to InvokeAI's features.
Add custom subjects and styles using HuggingFace's repository of embeddings.
### * [Image-to-Image Guide for the CLI](IMG2IMG.md) - Image Generation
Use a seed image to build new creations in the CLI.
### * [Inpainting Guide for the CLI](INPAINTING.md) - [Prompt Engineering](PROMPTS.md)
Selectively erase and replace portions of an existing image in the CLI.
### * [Outpainting Guide for the CLI](OUTPAINTING.md) Get the images you want with the InvokeAI prompt engineering language.
Extend the borders of the image with an "outcrop" function within the CLI.
### * [Generating Variations](VARIATIONS.md) - [Post-Processing](POSTPROCESS.md)
Have an image you like and want to generate many more like it? Variations
are the ticket.
## Model Management Restore mangled faces and make images larger with upscaling. Also see
the [Embiggen Upscaling Guide](EMBIGGEN.md).
## * [Model Installation](../installation/050_INSTALLING_MODELS.md) - The [Concepts Library](CONCEPTS.md)
Learn how to import third-party models and switch among them. This
guide also covers optimizing models to load quickly.
## * [Merging Models](MODEL_MERGING.md) Add custom subjects and styles using HuggingFace's repository of
Teach an old model new tricks. Merge 2-3 models together to create a embeddings.
new model that combines characteristics of the originals.
## * [Textual Inversion](TEXTUAL_INVERSION.md) - [Image-to-Image Guide for the CLI](IMG2IMG.md)
Personalize models by adding your own style or subjects.
# Other Features Use a seed image to build new creations in the CLI.
## * [The NSFW Checker](NSFW.md) - [Inpainting Guide for the CLI](INPAINTING.md)
Prevent InvokeAI from displaying unwanted racy images.
## * [Miscellaneous](OTHER.md) Selectively erase and replace portions of an existing image in the CLI.
Run InvokeAI on Google Colab, generate images with repeating patterns,
batch process a file of prompts, increase the "creativity" of image - [Outpainting Guide for the CLI](OUTPAINTING.md)
generation by adding initial noise, and more!
Extend the borders of the image with an "outcrop" function within the
CLI.
- [Generating Variations](VARIATIONS.md)
Have an image you like and want to generate many more like it?
Variations are the ticket.
- Model Management
- [Model Installation](../installation/050_INSTALLING_MODELS.md)
Learn how to import third-party models and switch among them. This guide
also covers optimizing models to load quickly.
- [Merging Models](MODEL_MERGING.md)
Teach an old model new tricks. Merge 2-3 models together to create a new
model that combines characteristics of the originals.
- [Textual Inversion](TEXTUAL_INVERSION.md)
Personalize models by adding your own style or subjects.
- Other Features
- [The NSFW Checker](NSFW.md)
Prevent InvokeAI from displaying unwanted racy images.
- [Miscellaneous](OTHER.md)
Run InvokeAI on Google Colab, generate images with repeating patterns,
batch process a file of prompts, increase the "creativity" of image
generation by adding initial noise, and more!

View File

@ -0,0 +1,4 @@
# :octicons-file-code-16: IDE-Settings
Here we will share settings for IDEs used by our developers, maybe you can find
something interestening which will help to boost your development efficency 🔥

View File

@ -0,0 +1,250 @@
---
title: Visual Studio Code
---
# :material-microsoft-visual-studio-code:Visual Studio Code
The Workspace Settings are stored in the project (repository) root and get
higher priorized than your user settings.
This helps to have different settings for different projects, while the user
settings get used as a default value if no workspace settings are provided.
## tasks.json
First we will create a task configuration which will create a virtual
environment and update the deps (pip, setuptools and wheel).
Into this venv we will then install the pyproject.toml in editable mode with
dev, docs and test dependencies.
```json title=".vscode/tasks.json"
{
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"tasks": [
{
"label": "Create virtual environment",
"detail": "Create .venv and upgrade pip, setuptools and wheel",
"command": "python3",
"args": [
"-m",
"venv",
".venv",
"--prompt",
"InvokeAI",
"--upgrade-deps"
],
"runOptions": {
"instanceLimit": 1,
"reevaluateOnRerun": true
},
"group": {
"kind": "build"
},
"presentation": {
"echo": true,
"reveal": "always",
"focus": false,
"panel": "shared",
"showReuseMessage": true,
"clear": false
}
},
{
"label": "build InvokeAI",
"detail": "Build pyproject.toml with extras dev, docs and test",
"command": "${workspaceFolder}/.venv/bin/python3",
"args": [
"-m",
"pip",
"install",
"--use-pep517",
"--editable",
".[dev,docs,test]"
],
"dependsOn": "Create virtual environment",
"dependsOrder": "sequence",
"group": {
"kind": "build",
"isDefault": true
},
"presentation": {
"echo": true,
"reveal": "always",
"focus": false,
"panel": "shared",
"showReuseMessage": true,
"clear": false
}
}
]
}
```
The fastest way to build InvokeAI now is ++cmd+shift+b++
## launch.json
This file is used to define debugger configurations, so that you can one-click
launch and monitor the application, set halt points to inspect specific states,
...
```json title=".vscode/launch.json"
{
"version": "0.2.0",
"configurations": [
{
"name": "invokeai web",
"type": "python",
"request": "launch",
"program": ".venv/bin/invokeai",
"justMyCode": true
},
{
"name": "invokeai cli",
"type": "python",
"request": "launch",
"program": ".venv/bin/invokeai",
"justMyCode": true
},
{
"name": "mkdocs serve",
"type": "python",
"request": "launch",
"program": ".venv/bin/mkdocs",
"args": ["serve"],
"justMyCode": true
}
]
}
```
Then you only need to hit ++f5++ and the fun begins :nerd: (It is asumed that
you have created a virtual environment via the [tasks](#tasksjson) from the
previous step.)
## extensions.json
A list of recommended vscode-extensions to make your life easier:
```json title=".vscode/extensions.json"
{
"recommendations": [
"editorconfig.editorconfig",
"github.vscode-pull-request-github",
"ms-python.black-formatter",
"ms-python.flake8",
"ms-python.isort",
"ms-python.python",
"ms-python.vscode-pylance",
"redhat.vscode-yaml",
"tamasfe.even-better-toml",
"eamodio.gitlens",
"foxundermoon.shell-format",
"timonwong.shellcheck",
"esbenp.prettier-vscode",
"davidanson.vscode-markdownlint",
"yzhang.markdown-all-in-one",
"bierner.github-markdown-preview",
"ms-azuretools.vscode-docker",
"mads-hartmann.bash-ide-vscode"
]
}
```
## settings.json
With bellow settings your files already get formated when you save them (only
your modifications if available), which will help you to not run into trouble
with the pre-commit hooks. If the hooks fail, they will prevent you from
commiting, but most hooks directly add a fixed version, so that you just need to
stage and commit them:
```json title=".vscode/settings.json"
{
"[json]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.quickSuggestions": {
"comments": false,
"strings": true,
"other": true
},
"editor.suggest.insertMode": "replace",
"gitlens.codeLens.scopes": ["document"]
},
"[jsonc]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "modificationsIfAvailable"
},
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter",
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "file"
},
"[toml]": {
"editor.defaultFormatter": "tamasfe.even-better-toml",
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "modificationsIfAvailable"
},
"[yaml]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "modificationsIfAvailable"
},
"[markdown]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.rulers": [80],
"editor.unicodeHighlight.ambiguousCharacters": false,
"editor.unicodeHighlight.invisibleCharacters": false,
"diffEditor.ignoreTrimWhitespace": false,
"editor.wordWrap": "on",
"editor.quickSuggestions": {
"comments": "off",
"strings": "off",
"other": "off"
},
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "modificationsIfAvailable"
},
"[shellscript]": {
"editor.defaultFormatter": "foxundermoon.shell-format"
},
"[ignore]": {
"editor.defaultFormatter": "foxundermoon.shell-format"
},
"editor.rulers": [88],
"evenBetterToml.formatter.alignEntries": false,
"evenBetterToml.formatter.allowedBlankLines": 1,
"evenBetterToml.formatter.arrayAutoExpand": true,
"evenBetterToml.formatter.arrayTrailingComma": true,
"evenBetterToml.formatter.arrayAutoCollapse": true,
"evenBetterToml.formatter.columnWidth": 88,
"evenBetterToml.formatter.compactArrays": true,
"evenBetterToml.formatter.compactInlineTables": true,
"evenBetterToml.formatter.indentEntries": false,
"evenBetterToml.formatter.inlineTableExpand": true,
"evenBetterToml.formatter.reorderArrays": true,
"evenBetterToml.formatter.reorderKeys": true,
"evenBetterToml.formatter.compactEntries": false,
"evenBetterToml.schema.enabled": true,
"python.analysis.typeCheckingMode": "basic",
"python.formatting.provider": "black",
"python.languageServer": "Pylance",
"python.linting.enabled": true,
"python.linting.flake8Enabled": true,
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true,
"python.testing.pytestArgs": [
"tests",
"--cov=ldm",
"--cov-branch",
"--cov-report=term:skip-covered"
],
"yaml.schemas": {
"https://json.schemastore.org/prettierrc.json": "${workspaceFolder}/.prettierrc.yaml"
}
}
```

View File

@ -0,0 +1,135 @@
---
title: Pull-Request
---
# :octicons-git-pull-request-16: Pull-Request
## pre-requirements
To follow the steps in this tutorial you will need:
- [GitHub](https://github.com) account
- [git](https://git-scm.com/downloads) source controll
- Text / Code Editor (personally I preffer
[Visual Studio Code](https://code.visualstudio.com/Download))
- Terminal:
- If you are on Linux/MacOS you can use bash or zsh
- for Windows Users the commands are written for PowerShell
## Fork Repository
The first step to be done if you want to contribute to InvokeAI, is to fork the
rpeository.
Since you are already reading this doc, the easiest way to do so is by clicking
[here](https://github.com/invoke-ai/InvokeAI/fork). You could also open
[InvokeAI](https://github.com/invoke-ai/InvoekAI) and click on the "Fork" Button
in the top right.
## Clone your fork
After you forked the Repository, you should clone it to your dev machine:
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
``` sh
git clone https://github.com/<github username>/InvokeAI \
&& cd InvokeAI
```
=== ":fontawesome-brands-windows:Windows"
``` powershell
git clone https://github.com/<github username>/InvokeAI `
&& cd InvokeAI
```
## Install in Editable Mode
To install InvokeAI in editable mode, (as always) we recommend to create and
activate a venv first. Afterwards you can install the InvokeAI Package,
including dev and docs extras in editable mode, follwed by the installation of
the pre-commit hook:
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
``` sh
python -m venv .venv \
--prompt InvokeAI \
--upgrade-deps \
&& source .venv/bin/activate \
&& pip install \
--upgrade-deps \
--use-pep517 \
--editable=".[dev,docs]" \
&& pre-commit install
```
=== ":fontawesome-brands-windows:Windows"
``` powershell
python -m venv .venv `
--prompt InvokeAI `
--upgrade-deps `
&& .venv/scripts/activate.ps1 `
&& pip install `
--upgrade `
--use-pep517 `
--editable=".[dev,docs]" `
&& pre-commit install
```
## Create a branch
Make sure you are on main branch, from there create your feature branch:
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
``` sh
git checkout main \
&& git pull \
&& git checkout -B <branch name>
```
=== ":fontawesome-brands-windows:Windows"
``` powershell
git checkout main `
&& git pull `
&& git checkout -B <branch name>
```
## Commit your changes
When you are done with adding / updating content, you need to commit those
changes to your repository before you can actually open an PR:
```{ .sh .annotate }
git add <files you have changed> # (1)!
git commit -m "A commit message which describes your change"
git push
```
1. Replace this with a space seperated list of the files you changed, like:
`README.md foo.sh bar.json baz`
## Create a Pull Request
After pushing your changes, you are ready to create a Pull Request. just head
over to your fork on [GitHub](https://github.com), which should already show you
a message that there have been recent changes on your feature branch and a green
button which you could use to create the PR.
The default target for your PRs would be the main branch of
[invoke-ai/InvokeAI](https://github.com/invoke-ai/InvokeAI)
Another way would be to create it in VS-Code or via the GitHub CLI (or even via
the GitHub CLI in a VS-Code Terminal Window 🤭):
```sh
gh pr create
```
The CLI will inform you if there are still unpushed commits on your branch. It
will also prompt you for things like the the Title and the Body (Description) if
you did not already pass them as arguments.

View File

@ -0,0 +1,26 @@
---
title: Issues
---
# :octicons-issue-opened-16: Issues
## :fontawesome-solid-bug: Report a bug
If you stumbled over a bug while using InvokeAI, we would apreciate it a lot if
you
[open a issue](https://github.com/invoke-ai/InvokeAI/issues/new?assignees=&labels=bug&template=BUG_REPORT.yml&title=%5Bbug%5D%3A+)
to inform us about the details so that our developers can look into it.
If you also know how to fix the bug, take a look [here](010_PULL_REQUEST.md) to
find out how to create a Pull Request.
## Request a feature
If you have a idea for a new feature on your mind which you would like to see in
InvokeAI, there is a
[feature request](https://github.com/invoke-ai/InvokeAI/issues/new?assignees=&labels=bug&template=BUG_REPORT.yml&title=%5Bbug%5D%3A+)
available in the issues section of the repository.
If you are just curious which features already got requested you can find the
overview of open requests
[here](https://github.com/invoke-ai/InvokeAI/labels/enhancement)

View File

@ -0,0 +1,32 @@
---
title: docs
---
# :simple-readthedocs: MkDocs-Material
If you want to contribute to the docs, there is a easy way to verify the results
of your changes before commiting them.
Just follow the steps in the [Pull-Requests](010_PULL_REQUEST.md) docs, there we
already
[create a venv and install the docs extras](010_PULL_REQUEST.md#install-in-editable-mode).
When installed it's as simple as:
```sh
mkdocs serve
```
This will build the docs locally and serve them on your local host, even
auto-refresh is included, so you can just update a doc, save it and tab to the
browser, without the needs of restarting the `mkdocs serve`.
More information about the "mkdocs flavored markdown syntax" can be found
[here](https://squidfunk.github.io/mkdocs-material/reference/).
## :material-microsoft-visual-studio-code:VS-Code
We also provide a
[launch configuration for VS-Code](../IDE-Settings/vs-code.md#launchjson) which
includes a `mkdocs serve` entrypoint as well. You also don't have to worry about
the formatting since this is automated via prettier, but this is of course not
limited to VS-Code.

View File

@ -0,0 +1,76 @@
# Tranformation to nodes
## Current state
```mermaid
flowchart TD
web[WebUI];
cli[CLI];
web --> |img2img| generate(generate);
web --> |txt2img| generate(generate);
cli --> |txt2img| generate(generate);
cli --> |img2img| generate(generate);
generate --> model_manager;
generate --> generators;
generate --> ti_manager[TI Manager];
generate --> etc;
```
## Transitional Architecture
### first step
```mermaid
flowchart TD
web[WebUI];
cli[CLI];
web --> |img2img| img2img_node(Img2img node);
web --> |txt2img| generate(generate);
img2img_node --> model_manager;
img2img_node --> generators;
cli --> |txt2img| generate;
cli --> |img2img| generate;
generate --> model_manager;
generate --> generators;
generate --> ti_manager[TI Manager];
generate --> etc;
```
### second step
```mermaid
flowchart TD
web[WebUI];
cli[CLI];
web --> |img2img| img2img_node(img2img node);
img2img_node --> model_manager;
img2img_node --> generators;
web --> |txt2img| txt2img_node(txt2img node);
cli --> |txt2img| txt2img_node;
cli --> |img2img| generate(generate);
generate --> model_manager;
generate --> generators;
generate --> ti_manager[TI Manager];
generate --> etc;
txt2img_node --> model_manager;
txt2img_node --> generators;
txt2img_node --> ti_manager[TI Manager];
```
## Final Architecture
```mermaid
flowchart TD
web[WebUI];
cli[CLI];
web --> |img2img|img2img_node(img2img node);
cli --> |img2img|img2img_node;
web --> |txt2img|txt2img_node(txt2img node);
cli --> |txt2img|txt2img_node;
img2img_node --> model_manager;
txt2img_node --> model_manager;
img2img_node --> generators;
txt2img_node --> generators;
img2img_node --> ti_manager[TI Manager];
txt2img_node --> ti_manager[TI Manager];
```

View File

@ -0,0 +1,16 @@
---
title: Contributing
---
# :fontawesome-solid-code-commit: Contributing
There are different ways how you can contribute to
[InvokeAI](https://github.com/invoke-ai/InvokeAI), like Translations, opening
Issues for Bugs or ideas how to improve.
This Section of the docs will explain some of the different ways of how you can
contribute to make it easier for newcommers as well as advanced users :nerd:
If you want to contribute code, but you do not have an exact idea yet, take a
look at the currently open
[:fontawesome-solid-bug: Bug Reports](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen+label%3Abug)

12
docs/help/index.md Normal file
View File

@ -0,0 +1,12 @@
# :material-help:Help
If you are looking for help with the installation of InvokeAI, please take a
look into the [Installation](../installation/index.md) section of the docs.
Here you will find help to topics like
- how to contribute
- configuration recommendation for IDEs
If you have an Idea about what's missing and aren't scared from contributing,
just take a look at [DOCS](./contributing/030_DOCS.md) to find out how to do so.

View File

@ -2,6 +2,8 @@
title: Home title: Home
--- ---
# :octicons-home-16: Home
<!-- <!--
The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as:: The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as::
@ -29,36 +31,36 @@ title: Home
[![github open prs badge]][github open prs link] [![github open prs badge]][github open prs link]
[ci checks on dev badge]: [ci checks on dev badge]:
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
[ci checks on dev link]: [ci checks on dev link]:
https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
[ci checks on main badge]: [ci checks on main badge]:
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
[ci checks on main link]: [ci checks on main link]:
https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord [discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
[discord link]: https://discord.gg/ZmtBAhwWhy [discord link]: https://discord.gg/ZmtBAhwWhy
[github forks badge]: [github forks badge]:
https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
[github forks link]: [github forks link]:
https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion
[github open issues badge]: [github open issues badge]:
https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github
[github open issues link]: [github open issues link]:
https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen
[github open prs badge]: [github open prs badge]:
https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github
[github open prs link]: [github open prs link]:
https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
[github stars badge]: [github stars badge]:
https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers [github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
[latest commit to dev badge]: [latest commit to dev badge]:
https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900 https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
[latest commit to dev link]: [latest commit to dev link]:
https://github.com/invoke-ai/InvokeAI/commits/development https://github.com/invoke-ai/InvokeAI/commits/development
[latest release badge]: [latest release badge]:
https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases [latest release link]: https://github.com/invoke-ai/InvokeAI/releases
</div> </div>
@ -87,24 +89,24 @@ Q&A</a>]
You wil need one of the following: You wil need one of the following:
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory. - :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux - :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
only) only)
- :fontawesome-brands-apple: An Apple computer with an M1 chip. - :fontawesome-brands-apple: An Apple computer with an M1 chip.
We do **not recommend** the following video cards due to issues with their We do **not recommend** the following video cards due to issues with their
running in half-precision mode and having insufficient VRAM to render 512x512 running in half-precision mode and having insufficient VRAM to render 512x512
images in full-precision mode: images in full-precision mode:
- NVIDIA 10xx series cards such as the 1080ti - NVIDIA 10xx series cards such as the 1080ti
- GTX 1650 series cards - GTX 1650 series cards
- GTX 1660 series cards - GTX 1660 series cards
### :fontawesome-solid-memory: Memory and Disk ### :fontawesome-solid-memory: Memory and Disk
- At least 12 GB Main Memory RAM. - At least 12 GB Main Memory RAM.
- At least 18 GB of free disk space for the machine learning model, Python, and - At least 18 GB of free disk space for the machine learning model, Python,
all its dependencies. and all its dependencies.
## :octicons-package-dependencies-24: Installation ## :octicons-package-dependencies-24: Installation
@ -113,48 +115,65 @@ either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
driver). driver).
### [Installation Getting Started Guide](installation) ### [Installation Getting Started Guide](installation)
#### [Automated Installer](installation/010_INSTALL_AUTOMATED.md) #### [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
This method is recommended for 1st time users This method is recommended for 1st time users
#### [Manual Installation](installation/020_INSTALL_MANUAL.md) #### [Manual Installation](installation/020_INSTALL_MANUAL.md)
This method is recommended for experienced users and developers This method is recommended for experienced users and developers
#### [Docker Installation](installation/040_INSTALL_DOCKER.md) #### [Docker Installation](installation/040_INSTALL_DOCKER.md)
This method is recommended for those familiar with running Docker containers This method is recommended for those familiar with running Docker containers
### Other Installation Guides ### Other Installation Guides
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
- [XFormers](installation/070_INSTALL_XFORMERS.md) - [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md) - [XFormers](installation/070_INSTALL_XFORMERS.md)
- [Installing New Models](installation/050_INSTALLING_MODELS.md) - [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
## :octicons-gift-24: InvokeAI Features ## :octicons-gift-24: InvokeAI Features
### The InvokeAI Web Interface ### The InvokeAI Web Interface
- [WebUI overview](features/WEB.md)
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md) - [WebUI overview](features/WEB.md)
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md) - [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
<!-- separator --> <!-- separator -->
### The InvokeAI Command Line Interface ### The InvokeAI Command Line Interface
- [Command Line Interace Reference Guide](features/CLI.md)
- [Command Line Interace Reference Guide](features/CLI.md)
<!-- separator --> <!-- separator -->
### Image Management ### Image Management
- [Image2Image](features/IMG2IMG.md)
- [Inpainting](features/INPAINTING.md) - [Image2Image](features/IMG2IMG.md)
- [Outpainting](features/OUTPAINTING.md) - [Inpainting](features/INPAINTING.md)
- [Adding custom styles and subjects](features/CONCEPTS.md) - [Outpainting](features/OUTPAINTING.md)
- [Upscaling and Face Reconstruction](features/POSTPROCESS.md) - [Adding custom styles and subjects](features/CONCEPTS.md)
- [Embiggen upscaling](features/EMBIGGEN.md) - [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
- [Other Features](features/OTHER.md) - [Embiggen upscaling](features/EMBIGGEN.md)
- [Other Features](features/OTHER.md)
<!-- separator --> <!-- separator -->
### Model Management ### Model Management
- [Installing](installation/050_INSTALLING_MODELS.md)
- [Model Merging](features/MODEL_MERGING.md) - [Installing](installation/050_INSTALLING_MODELS.md)
- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md) - [Model Merging](features/MODEL_MERGING.md)
- [Textual Inversion](features/TEXTUAL_INVERSION.md) - [Style/Subject Concepts and Embeddings](features/CONCEPTS.md)
- [Not Safe for Work (NSFW) Checker](features/NSFW.md) - [Textual Inversion](features/TEXTUAL_INVERSION.md)
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
<!-- seperator --> <!-- seperator -->
### Prompt Engineering ### Prompt Engineering
- [Prompt Syntax](features/PROMPTS.md)
- [Generating Variations](features/VARIATIONS.md) - [Prompt Syntax](features/PROMPTS.md)
- [Generating Variations](features/VARIATIONS.md)
## :octicons-log-16: Latest Changes ## :octicons-log-16: Latest Changes
@ -162,84 +181,188 @@ This method is recommended for those familiar with running Docker containers
#### Migration to Stable Diffusion `diffusers` models #### Migration to Stable Diffusion `diffusers` models
Previous versions of InvokeAI supported the original model file format introduced with Stable Diffusion 1.4. In the original format, known variously as "checkpoint", or "legacy" format, there is a single large weights file ending with `.ckpt` or `.safetensors`. Though this format has served the community well, it has a number of disadvantages, including file size, slow loading times, and a variety of non-standard variants that require special-case code to handle. In addition, because checkpoint files are actually a bundle of multiple machine learning sub-models, it is hard to swap different sub-models in and out, or to share common sub-models. A new format, introduced by the StabilityAI company in collaboration with HuggingFace, is called `diffusers` and consists of a directory of individual models. The most immediate benefit of `diffusers` is that they load from disk very quickly. A longer term benefit is that in the near future `diffusers` models will be able to share common sub-models, dramatically reducing disk space when you have multiple fine-tune models derived from the same base. Previous versions of InvokeAI supported the original model file format
introduced with Stable Diffusion 1.4. In the original format, known variously as
"checkpoint", or "legacy" format, there is a single large weights file ending
with `.ckpt` or `.safetensors`. Though this format has served the community
well, it has a number of disadvantages, including file size, slow loading times,
and a variety of non-standard variants that require special-case code to handle.
In addition, because checkpoint files are actually a bundle of multiple machine
learning sub-models, it is hard to swap different sub-models in and out, or to
share common sub-models. A new format, introduced by the StabilityAI company in
collaboration with HuggingFace, is called `diffusers` and consists of a
directory of individual models. The most immediate benefit of `diffusers` is
that they load from disk very quickly. A longer term benefit is that in the near
future `diffusers` models will be able to share common sub-models, dramatically
reducing disk space when you have multiple fine-tune models derived from the
same base.
When you perform a new install of version 2.3.0, you will be offered the option to install the `diffusers` versions of a number of popular SD models, including Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of 2.1). These will act and work just like the checkpoint versions. Do not be concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk! InvokeAI 2.3.0 can still load these and generate images from them without any extra intervention on your part. When you perform a new install of version 2.3.0, you will be offered the option
to install the `diffusers` versions of a number of popular SD models, including
Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of
2.1). These will act and work just like the checkpoint versions. Do not be
concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk!
InvokeAI 2.3.0 can still load these and generate images from them without any
extra intervention on your part.
To take advantage of the optimized loading times of `diffusers` models, InvokeAI offers options to convert legacy checkpoint models into optimized `diffusers` models. If you use the `invokeai` command line interface, the relevant commands are: To take advantage of the optimized loading times of `diffusers` models, InvokeAI
offers options to convert legacy checkpoint models into optimized `diffusers`
models. If you use the `invokeai` command line interface, the relevant commands
are:
* `!convert_model` -- Take the path to a local checkpoint file or a URL that is pointing to one, convert it into a `diffusers` model, and import it into InvokeAI's models registry file. - `!convert_model` -- Take the path to a local checkpoint file or a URL that
* `!optimize_model` -- If you already have a checkpoint model in your InvokeAI models file, this command will accept its short name and convert it into a like-named `diffusers` model, optionally deleting the original checkpoint file. is pointing to one, convert it into a `diffusers` model, and import it into
* `!import_model` -- Take the local path of either a checkpoint file or a `diffusers` model directory and import it into InvokeAI's registry file. You may also provide the ID of any diffusers model that has been published on the [HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads) and it will be downloaded and installed automatically. InvokeAI's models registry file.
- `!optimize_model` -- If you already have a checkpoint model in your InvokeAI
models file, this command will accept its short name and convert it into a
like-named `diffusers` model, optionally deleting the original checkpoint
file.
- `!import_model` -- Take the local path of either a checkpoint file or a
`diffusers` model directory and import it into InvokeAI's registry file. You
may also provide the ID of any diffusers model that has been published on
the
[HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads)
and it will be downloaded and installed automatically.
The WebGUI offers similar functionality for model management. The WebGUI offers similar functionality for model management.
For advanced users, new command-line options provide additional functionality. Launching `invokeai` with the argument `--autoconvert <path to directory>` takes the path to a directory of checkpoint files, automatically converts them into `diffusers` models and imports them. Each time the script is launched, the directory will be scanned for new checkpoint files to be loaded. Alternatively, the `--ckpt_convert` argument will cause any checkpoint or safetensors model that is already registered with InvokeAI to be converted into a `diffusers` model on the fly, allowing you to take advantage of future diffusers-only features without explicitly converting the model and saving it to disk. For advanced users, new command-line options provide additional functionality.
Launching `invokeai` with the argument `--autoconvert <path to directory>` takes
the path to a directory of checkpoint files, automatically converts them into
`diffusers` models and imports them. Each time the script is launched, the
directory will be scanned for new checkpoint files to be loaded. Alternatively,
the `--ckpt_convert` argument will cause any checkpoint or safetensors model
that is already registered with InvokeAI to be converted into a `diffusers`
model on the fly, allowing you to take advantage of future diffusers-only
features without explicitly converting the model and saving it to disk.
Please see [INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/) for more information on model management in both the command-line and Web interfaces. Please see
[INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/)
for more information on model management in both the command-line and Web
interfaces.
#### Support for the `XFormers` Memory-Efficient Crossattention Package #### Support for the `XFormers` Memory-Efficient Crossattention Package
On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once installed, the`xformers` package dramatically reduces the memory footprint of loaded Stable Diffusion models files and modestly increases image generation speed. `xformers` will be installed and activated automatically if you specify a CUDA system at install time. On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once
installed, the`xformers` package dramatically reduces the memory footprint of
loaded Stable Diffusion models files and modestly increases image generation
speed. `xformers` will be installed and activated automatically if you specify a
CUDA system at install time.
The caveat with using `xformers` is that it introduces slightly non-deterministic behavior, and images generated using the same seed and other settings will be subtly different between invocations. Generally the changes are unnoticeable unless you rapidly shift back and forth between images, but to disable `xformers` and restore fully deterministic behavior, you may launch InvokeAI using the `--no-xformers` option. This is most conveniently done by opening the file `invokeai/invokeai.init` with a text editor, and adding the line `--no-xformers` at the bottom. The caveat with using `xformers` is that it introduces slightly
non-deterministic behavior, and images generated using the same seed and other
settings will be subtly different between invocations. Generally the changes are
unnoticeable unless you rapidly shift back and forth between images, but to
disable `xformers` and restore fully deterministic behavior, you may launch
InvokeAI using the `--no-xformers` option. This is most conveniently done by
opening the file `invokeai/invokeai.init` with a text editor, and adding the
line `--no-xformers` at the bottom.
#### A Negative Prompt Box in the WebUI #### A Negative Prompt Box in the WebUI
There is now a separate text input box for negative prompts in the WebUI. This is convenient for stashing frequently-used negative prompts ("mangled limbs, bad anatomy"). The `[negative prompt]` syntax continues to work in the main prompt box as well. There is now a separate text input box for negative prompts in the WebUI. This
is convenient for stashing frequently-used negative prompts ("mangled limbs, bad
anatomy"). The `[negative prompt]` syntax continues to work in the main prompt
box as well.
To see exactly how your prompts are being parsed, launch `invokeai` with the `--log_tokenization` option. The console window will then display the tokenization process for both positive and negative prompts. To see exactly how your prompts are being parsed, launch `invokeai` with the
`--log_tokenization` option. The console window will then display the
tokenization process for both positive and negative prompts.
#### Model Merging #### Model Merging
Version 2.3.0 offers an intuitive user interface for merging up to three Stable Diffusion models using an intuitive user interface. Model merging allows you to mix the behavior of models to achieve very interesting effects. To use this, each of the models must already be imported into InvokeAI and saved in `diffusers` format, then launch the merger using a new menu item in the InvokeAI launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line with `invokeai-merge --gui`. You will be prompted to select the models to merge, the proportions in which to mix them, and the mixing algorithm. The script will create a new merged `diffusers` model and import it into InvokeAI for your use. Version 2.3.0 offers an intuitive user interface for merging up to three Stable
Diffusion models using an intuitive user interface. Model merging allows you to
mix the behavior of models to achieve very interesting effects. To use this,
each of the models must already be imported into InvokeAI and saved in
`diffusers` format, then launch the merger using a new menu item in the InvokeAI
launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line
with `invokeai-merge --gui`. You will be prompted to select the models to merge,
the proportions in which to mix them, and the mixing algorithm. The script will
create a new merged `diffusers` model and import it into InvokeAI for your use.
See [MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/) for more details. See
[MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/)
for more details.
#### Textual Inversion Training #### Textual Inversion Training
Textual Inversion (TI) is a technique for training a Stable Diffusion model to emit a particular subject or style when triggered by a keyword phrase. You can perform TI training by placing a small number of images of the subject or style in a directory, and choosing a distinctive trigger phrase, such as "pointillist-style". After successful training, The subject or style will be activated by including `<pointillist-style>` in your prompt. Textual Inversion (TI) is a technique for training a Stable Diffusion model to
emit a particular subject or style when triggered by a keyword phrase. You can
perform TI training by placing a small number of images of the subject or style
in a directory, and choosing a distinctive trigger phrase, such as
"pointillist-style". After successful training, The subject or style will be
activated by including `<pointillist-style>` in your prompt.
Previous versions of InvokeAI were able to perform TI, but it required using a command-line script with dozens of obscure command-line arguments. Version 2.3.0 features an intuitive TI frontend that will build a TI model on top of any `diffusers` model. To access training you can launch from a new item in the launcher script or from the command line using `invokeai-ti --gui`. Previous versions of InvokeAI were able to perform TI, but it required using a
command-line script with dozens of obscure command-line arguments. Version 2.3.0
features an intuitive TI frontend that will build a TI model on top of any
`diffusers` model. To access training you can launch from a new item in the
launcher script or from the command line using `invokeai-ti --gui`.
See [TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/) for further details. See
[TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/)
for further details.
#### A New Installer Experience #### A New Installer Experience
The InvokeAI installer has been upgraded in order to provide a smoother and hopefully more glitch-free experience. In addition, InvokeAI is now packaged as a PyPi project, allowing developers and power-users to install InvokeAI with the command `pip install InvokeAI --use-pep517`. Please see [Installation](#installation) for details. The InvokeAI installer has been upgraded in order to provide a smoother and
hopefully more glitch-free experience. In addition, InvokeAI is now packaged as
a PyPi project, allowing developers and power-users to install InvokeAI with the
command `pip install InvokeAI --use-pep517`. Please see
[Installation](#installation) for details.
Developers should be aware that the `pip` installation procedure has been simplified and that the `conda` method is no longer supported at all. Accordingly, the `environments_and_requirements` directory has been deleted from the repository. Developers should be aware that the `pip` installation procedure has been
simplified and that the `conda` method is no longer supported at all.
Accordingly, the `environments_and_requirements` directory has been deleted from
the repository.
#### Command-line name changes #### Command-line name changes
All of InvokeAI's functionality, including the WebUI, command-line interface, textual inversion training and model merging, can all be accessed from the `invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been expanded to add the new functionality. For the convenience of developers and power users, we have normalized the names of the InvokeAI command-line scripts: All of InvokeAI's functionality, including the WebUI, command-line interface,
textual inversion training and model merging, can all be accessed from the
`invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been
expanded to add the new functionality. For the convenience of developers and
power users, we have normalized the names of the InvokeAI command-line scripts:
* `invokeai` -- Command-line client - `invokeai` -- Command-line client
* `invokeai --web` -- Web GUI - `invokeai --web` -- Web GUI
* `invokeai-merge --gui` -- Model merging script with graphical front end - `invokeai-merge --gui` -- Model merging script with graphical front end
* `invokeai-ti --gui` -- Textual inversion script with graphical front end - `invokeai-ti --gui` -- Textual inversion script with graphical front end
* `invokeai-configure` -- Configuration tool for initializing the `invokeai` directory and selecting popular starter models. - `invokeai-configure` -- Configuration tool for initializing the `invokeai`
directory and selecting popular starter models.
For backward compatibility, the old command names are also recognized, including `invoke.py` and `configure-invokeai.py`. However, these are deprecated and will eventually be removed. For backward compatibility, the old command names are also recognized, including
`invoke.py` and `configure-invokeai.py`. However, these are deprecated and will
eventually be removed.
Developers should be aware that the locations of the script's source code has been moved. The new locations are: Developers should be aware that the locations of the script's source code has
* `invokeai` => `ldm/invoke/CLI.py` been moved. The new locations are:
* `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py`
* `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py`
* `invokeai-merge` => `ldm/invoke/merge_diffusers`
Developers are strongly encouraged to perform an "editable" install of InvokeAI using `pip install -e . --use-pep517` in the Git repository, and then to call the scripts using their 2.3.0 names, rather than executing the scripts directly. Developers should also be aware that the several important data files have been relocated into a new directory named `invokeai`. This includes the WebGUI's `frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used by the installer to select starter models. Eventually all InvokeAI modules will be in subdirectories of `invokeai`. - `invokeai` => `ldm/invoke/CLI.py`
- `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py`
- `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py`
- `invokeai-merge` => `ldm/invoke/merge_diffusers`
Please see [2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0) for further details. Developers are strongly encouraged to perform an "editable" install of InvokeAI
For older changelogs, please visit the using `pip install -e . --use-pep517` in the Git repository, and then to call
the scripts using their 2.3.0 names, rather than executing the scripts directly.
Developers should also be aware that the several important data files have been
relocated into a new directory named `invokeai`. This includes the WebGUI's
`frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used
by the installer to select starter models. Eventually all InvokeAI modules will
be in subdirectories of `invokeai`.
Please see
[2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0)
for further details. For older changelogs, please visit the
**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**. **[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
## :material-target: Troubleshooting ## :material-target: Troubleshooting
Please check out our **[:material-frequently-asked-questions: Please check out our
Troubleshooting **[:material-frequently-asked-questions: Troubleshooting Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)**
Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)** to to get solutions for common installation problems and other issues.
get solutions for common installation problems and other issues.
## :octicons-repo-push-24: Contributing ## :octicons-repo-push-24: Contributing
@ -265,8 +388,8 @@ thank them for their time, hard work and effort.
For support, please use this repository's GitHub Issues tracking service. Feel For support, please use this repository's GitHub Issues tracking service. Feel
free to send me an email if you use and like the script. free to send me an email if you use and like the script.
Original portions of the software are Copyright (c) 2022-23 Original portions of the software are Copyright (c) 2022-23 by
by [The InvokeAI Team](https://github.com/invoke-ai). [The InvokeAI Team](https://github.com/invoke-ai).
## :octicons-book-24: Further Reading ## :octicons-book-24: Further Reading

View File

@ -221,7 +221,10 @@ experimental versions later.
- ***NSFW checker*** - ***NSFW checker***
If checked, InvokeAI will test images for potential sexual content If checked, InvokeAI will test images for potential sexual content
and blur them out if found. and blur them out if found. Note that the NSFW checker consumes
an additional 0.6 GB of VRAM on top of the 2-3 GB of VRAM used
by most image models. If you have a low VRAM GPU (4-6 GB), you
can reduce out of memory errors by disabling the checker.
- ***HuggingFace Access Token*** - ***HuggingFace Access Token***
InvokeAI has the ability to download embedded styles and subjects InvokeAI has the ability to download embedded styles and subjects
@ -440,6 +443,52 @@ the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
assistance. assistance.
### Out of Memory Issues
The models are large, VRAM is expensive, and you may find yourself
faced with Out of Memory errors when generating images. Here are some
tips to reduce the problem:
* **4 GB of VRAM**
This should be adequate for 512x512 pixel images using Stable Diffusion 1.5
and derived models, provided that you **disable** the NSFW checker. To
disable the filter, do one of the following:
* Select option (6) "_change InvokeAI startup options_" from the
launcher. This will bring up the console-based startup settings
dialogue and allow you to unselect the "NSFW Checker" option.
* Start the startup settings dialogue directly by running
`invokeai-configure --skip-sd-weights --skip-support-models`
from the command line.
* Find the `invokeai.init` initialization file in the InvokeAI root
directory, open it in a text editor, and change `--nsfw_checker`
to `--no-nsfw_checker`
If you are on a CUDA system, you can realize significant memory
savings by activating the `xformers` library as described above. The
downside is `xformers` introduces non-deterministic behavior, such
that images generated with exactly the same prompt and settings will
be slightly different from each other. See above for more information.
* **6 GB of VRAM**
This is a border case. Using the SD 1.5 series you should be able to
generate images up to 640x640 with the NSFW checker enabled, and up to
1024x1024 with it disabled and `xformers` activated.
If you run into persistent memory issues there are a series of
environment variables that you can set before launching InvokeAI that
alter how the PyTorch machine learning library manages memory. See
https://pytorch.org/docs/stable/notes/cuda.html#memory-management for
a list of these tweaks.
* **12 GB of VRAM**
This should be sufficient to generate larger images up to about
1280x1280. If you wish to push further, consider activating
`xformers`.
### Other Problems ### Other Problems
If you run into problems during or after installation, the InvokeAI team is If you run into problems during or after installation, the InvokeAI team is

View File

@ -43,25 +43,31 @@ InvokeAI comes with support for a good set of starter models. You'll
find them listed in the master models file find them listed in the master models file
`configs/INITIAL_MODELS.yaml` in the InvokeAI root directory. The `configs/INITIAL_MODELS.yaml` in the InvokeAI root directory. The
subset that are currently installed are found in subset that are currently installed are found in
`configs/models.yaml`. The current list is: `configs/models.yaml`. As of v2.3.1, the list of starter models is:
| Model | HuggingFace Repo ID | Description | URL |Model Name | HuggingFace Repo ID | Description | URL |
| -------------------- | --------------------------------- | ---------------------------------------------------------- | -------------------------------------------------------------- | |---------- | ---------- | ----------- | --- |
| stable-diffusion-1.5 | runwayml/stable-diffusion-v1-5 | Most recent version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-5 | |stable-diffusion-1.5|runwayml/stable-diffusion-v1-5|Stable Diffusion version 1.5 diffusers model (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-v1-5 |
| stable-diffusion-1.4 | runwayml/stable-diffusion-v1-4 | Previous version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-4 | |sd-inpainting-1.5|runwayml/stable-diffusion-inpainting|RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-inpainting |
| inpainting-1.5 | runwayml/stable-diffusion-inpainting | Stable diffusion 1.5 optimized for inpainting | https://huggingface.co/runwayml/stable-diffusion-inpainting | |stable-diffusion-2.1|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
| stable-diffusion-2.1-base |stabilityai/stable-diffusion-2-1-base | Stable Diffusion version 2.1 trained on 512 pixel images | https://huggingface.co/stabilityai/stable-diffusion-2-1-base | |sd-inpainting-2.0|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.0 inpainting model (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
| stable-diffusion-2.1-768 |stabilityai/stable-diffusion-2-1 | Stable Diffusion version 2.1 trained on 768 pixel images | https://huggingface.co/stabilityai/stable-diffusion-2-1 | |analog-diffusion-1.0|wavymulder/Analog-Diffusion|An SD-1.5 model trained on diverse analog photographs (2.13 GB)|https://huggingface.co/wavymulder/Analog-Diffusion |
| dreamlike-diffusion-1.0 | dreamlike-art/dreamlike-diffusion-1.0 | An SD 1.5 model finetuned on high quality art | https://huggingface.co/dreamlike-art/dreamlike-diffusion-1.0 | |deliberate-1.0|XpucT/Deliberate|Versatile model that produces detailed images up to 768px (4.27 GB)|https://huggingface.co/XpucT/Deliberate |
| dreamlike-photoreal-2.0 | dreamlike-art/dreamlike-photoreal-2.0 | A photorealistic model trained on 768 pixel images| https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0 | |d&d-diffusion-1.0|0xJustin/Dungeons-and-Diffusion|Dungeons & Dragons characters (2.13 GB)|https://huggingface.co/0xJustin/Dungeons-and-Diffusion |
| openjourney-4.0 | prompthero/openjourney | An SD 1.5 model finetuned on Midjourney images prompt with "mdjrny-v4 style" | https://huggingface.co/prompthero/openjourney | |dreamlike-photoreal-2.0|dreamlike-art/dreamlike-photoreal-2.0|A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)|https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0 |
| nitro-diffusion-1.0 | nitrosocke/Nitro-Diffusion | An SD 1.5 model finetuned on three styles, prompt with "archer style", "arcane style" or "modern disney style" | https://huggingface.co/nitrosocke/Nitro-Diffusion| |inkpunk-1.0|Envvi/Inkpunk-Diffusion|Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)|https://huggingface.co/Envvi/Inkpunk-Diffusion |
| trinart-2.0 | naclbit/trinart_stable_diffusion_v2 | An SD 1.5 model finetuned with ~40,000 assorted high resolution manga/anime-style pictures | https://huggingface.co/naclbit/trinart_stable_diffusion_v2| |openjourney-4.0|prompthero/openjourney|An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)|https://huggingface.co/prompthero/openjourney |
| trinart-characters-2_0 | naclbit/trinart_derrida_characters_v2_stable_diffusion | An SD 1.5 model finetuned with 19.2M manga/anime-style pictures | https://huggingface.co/naclbit/trinart_derrida_characters_v2_stable_diffusion| |portrait-plus-1.0|wavymulder/portraitplus|An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB)|https://huggingface.co/wavymulder/portraitplus |
|seek-art-mega-1.0|coreco/seek.art_MEGA|A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)|https://huggingface.co/coreco/seek.art_MEGA |
|trinart-2.0|naclbit/trinart_stable_diffusion_v2|An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)|https://huggingface.co/naclbit/trinart_stable_diffusion_v2 |
|waifu-diffusion-1.4|hakurei/waifu-diffusion|An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB)|https://huggingface.co/hakurei/waifu-diffusion |
Note that these files are covered by an "Ethical AI" license which forbids Note that these files are covered by an "Ethical AI" license which
certain uses. When you initially download them, you are asked to forbids certain uses. When you initially download them, you are asked
accept the license terms. to accept the license terms. In addition, some of these models carry
additional license terms that limit their use in commercial
applications or on public servers. Be sure to familiarize yourself
with the model terms by visiting the URLs in the table above.
## Community-Contributed Models ## Community-Contributed Models

View File

@ -1,5 +0,0 @@
mkdocs
mkdocs-material>=8, <9
mkdocs-git-revision-date-localized-plugin
mkdocs-redirects==1.2.0

View File

@ -6,53 +6,83 @@ stable-diffusion-1.5:
repo_id: stabilityai/sd-vae-ft-mse repo_id: stabilityai/sd-vae-ft-mse
recommended: True recommended: True
default: True default: True
inpainting-1.5: sd-inpainting-1.5:
description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB) description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)
repo_id: runwayml/stable-diffusion-inpainting repo_id: runwayml/stable-diffusion-inpainting
format: diffusers format: diffusers
vae: vae:
repo_id: stabilityai/sd-vae-ft-mse repo_id: stabilityai/sd-vae-ft-mse
recommended: True recommended: True
dreamlike-diffusion-1.0:
description: An SD 1.5 model fine tuned on high quality art by dreamlike.art, diffusers version (2.13 BG)
format: diffusers
repo_id: dreamlike-art/dreamlike-diffusion-1.0
vae:
repo_id: stabilityai/sd-vae-ft-mse
recommended: True
dreamlike-photoreal-2.0:
description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)
format: diffusers
repo_id: dreamlike-art/dreamlike-photoreal-2.0
recommended: False
stable-diffusion-2.1-768: stable-diffusion-2.1-768:
description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB) description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)
repo_id: stabilityai/stable-diffusion-2-1 repo_id: stabilityai/stable-diffusion-2-1
format: diffusers format: diffusers
recommended: True recommended: True
stable-diffusion-2.1-base: stable-diffusion-2.1-base:
description: Stable Diffusion version 2.1 diffusers base model, trained on 512 pixel images (5.21 GB) description: Stable Diffusion version 2.1 diffusers model, trained on 512 pixel images (5.21 GB)
repo_id: stabilityai/stable-diffusion-2-1-base repo_id: stabilityai/stable-diffusion-2-1-base
format: diffusers format: diffusers
recommended: False recommended: False
sd-inpainting-2.0:
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
repo_id: stabilityai/stable-diffusion-2-inpainting
format: diffusers
recommended: False
analog-diffusion-1.0:
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
repo_id: wavymulder/Analog-Diffusion
format: diffusers
recommended: false
deliberate-1.0:
description: Versatile model that produces detailed images up to 768px (4.27 GB)
format: diffusers
repo_id: XpucT/Deliberate
recommended: False
d&d-diffusion-1.0:
description: Dungeons & Dragons characters (2.13 GB)
format: diffusers
repo_id: 0xJustin/Dungeons-and-Diffusion
recommended: False
dreamlike-photoreal-2.0:
description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)
format: diffusers
repo_id: dreamlike-art/dreamlike-photoreal-2.0
recommended: False
inkpunk-1.0:
description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)
format: diffusers
repo_id: Envvi/Inkpunk-Diffusion
recommended: False
openjourney-4.0: openjourney-4.0:
description: An SD 1.5 model fine tuned on Midjourney images by PromptHero - include "mdjrny-v4 style" in your prompts (2.13 GB) description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)
format: diffusers format: diffusers
repo_id: prompthero/openjourney repo_id: prompthero/openjourney
vae: vae:
repo_id: stabilityai/sd-vae-ft-mse repo_id: stabilityai/sd-vae-ft-mse
recommended: False recommended: False
nitro-diffusion-1.0: portrait-plus-1.0:
description: A SD 1.5 model trained on three artstyles - prompt with "archer style", "arcane style" and/or "modern disney style" (2.13 GB) description: An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB)
repo_id: nitrosocke/Nitro-Diffusion format: diffusers
repo_id: wavymulder/portraitplus
recommended: False
seek-art-mega-1.0:
description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)
repo_id: coreco/seek.art_MEGA
format: diffusers format: diffusers
vae: vae:
repo_id: stabilityai/sd-vae-ft-mse repo_id: stabilityai/sd-vae-ft-mse
recommended: False recommended: False
trinart-2.0: trinart-2.0:
description: An SD model finetuned with ~40,000 assorted high resolution manga/anime-style pictures, diffusers version (2.13 GB) description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)
repo_id: naclbit/trinart_stable_diffusion_v2 repo_id: naclbit/trinart_stable_diffusion_v2
format: diffusers format: diffusers
vae: vae:
repo_id: stabilityai/sd-vae-ft-mse repo_id: stabilityai/sd-vae-ft-mse
recommended: False recommended: False
waifu-diffusion-1.4:
description: An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB)
repo_id: hakurei/waifu-diffusion
format: diffusers
vae:
repo_id: stabilityai/sd-vae-ft-mse
recommended: False

File diff suppressed because one or more lines are too long

View File

@ -5,7 +5,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>InvokeAI - A Stable Diffusion Toolkit</title> <title>InvokeAI - A Stable Diffusion Toolkit</title>
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" /> <link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
<script type="module" crossorigin src="./assets/index-0e39fbc4.js"></script> <script type="module" crossorigin src="./assets/index-c33fa9da.js"></script>
<link rel="stylesheet" href="./assets/index-14cb2922.css"> <link rel="stylesheet" href="./assets/index-14cb2922.css">
</head> </head>

View File

@ -63,7 +63,8 @@
"statusConvertingModel": "Converting Model", "statusConvertingModel": "Converting Model",
"statusModelConverted": "Model Converted", "statusModelConverted": "Model Converted",
"statusMergingModels": "Merging Models", "statusMergingModels": "Merging Models",
"statusMergedModels": "Models Merged" "statusMergedModels": "Models Merged",
"pinOptionsPanel": "Pin Options Panel"
}, },
"gallery": { "gallery": {
"generations": "Generations", "generations": "Generations",
@ -393,7 +394,9 @@
"modelMergeInterpAddDifferenceHelp": "In this mode, Model 3 is first subtracted from Model 2. The resulting version is blended with Model 1 with the alpha rate set above.", "modelMergeInterpAddDifferenceHelp": "In this mode, Model 3 is first subtracted from Model 2. The resulting version is blended with Model 1 with the alpha rate set above.",
"inverseSigmoid": "Inverse Sigmoid", "inverseSigmoid": "Inverse Sigmoid",
"sigmoid": "Sigmoid", "sigmoid": "Sigmoid",
"weightedSum": "Weighted Sum" "weightedSum": "Weighted Sum",
"none": "none",
"addDifference": "Add Difference"
}, },
"parameters": { "parameters": {
"general": "General", "general": "General",

View File

@ -15,7 +15,7 @@
"langSpanish": "Español", "langSpanish": "Español",
"nodesDesc": "Un sistema de generación de imágenes basado en nodos, actualmente se encuentra en desarrollo. Mantente pendiente a nuestras actualizaciones acerca de esta fabulosa funcionalidad.", "nodesDesc": "Un sistema de generación de imágenes basado en nodos, actualmente se encuentra en desarrollo. Mantente pendiente a nuestras actualizaciones acerca de esta fabulosa funcionalidad.",
"postProcessing": "Post-procesamiento", "postProcessing": "Post-procesamiento",
"postProcessDesc1": "Invoke AI ofrece una gran variedad de funciones de post-procesamiento, El aumento de tamaño y Restauración de Rostros ya se encuentran disponibles en la interfaz web, puedes acceder desde el menú de Opciones Avanzadas en las pestañas de Texto a Imagen y de Imagen a Imagen. También puedes acceder a estas funciones directamente mediante el botón de acciones en el menú superior de la imagen actual o en el visualizador", "postProcessDesc1": "Invoke AI ofrece una gran variedad de funciones de post-procesamiento, El aumento de tamaño y Restauración de Rostros ya se encuentran disponibles en la interfaz web, puedes acceder desde el menú de Opciones Avanzadas en las pestañas de Texto a Imagen y de Imagen a Imagen. También puedes acceder a estas funciones directamente mediante el botón de acciones en el menú superior de la imagen actual o en el visualizador.",
"postProcessDesc2": "Una interfaz de usuario dedicada se lanzará pronto para facilitar flujos de trabajo de postprocesamiento más avanzado.", "postProcessDesc2": "Una interfaz de usuario dedicada se lanzará pronto para facilitar flujos de trabajo de postprocesamiento más avanzado.",
"postProcessDesc3": "La Interfaz de Línea de Comandos de Invoke AI ofrece muchas otras características, incluyendo -Embiggen-.", "postProcessDesc3": "La Interfaz de Línea de Comandos de Invoke AI ofrece muchas otras características, incluyendo -Embiggen-.",
"training": "Entrenamiento", "training": "Entrenamiento",
@ -44,7 +44,26 @@
"statusUpscaling": "Aumentando Tamaño", "statusUpscaling": "Aumentando Tamaño",
"statusUpscalingESRGAN": "Restaurando Rostros(ESRGAN)", "statusUpscalingESRGAN": "Restaurando Rostros(ESRGAN)",
"statusLoadingModel": "Cargando Modelo", "statusLoadingModel": "Cargando Modelo",
"statusModelChanged": "Modelo cambiado" "statusModelChanged": "Modelo cambiado",
"statusMergedModels": "Modelos combinados",
"githubLabel": "Github",
"discordLabel": "Discord",
"langEnglish": "Inglés",
"langDutch": "Holandés",
"langFrench": "Francés",
"langGerman": "Alemán",
"langItalian": "Italiano",
"langArabic": "Árabe",
"langJapanese": "Japones",
"langPolish": "Polaco",
"langBrPortuguese": "Portugués brasileño",
"langRussian": "Ruso",
"langSimplifiedChinese": "Chino simplificado",
"langUkranian": "Ucraniano",
"back": "Atrás",
"statusConvertingModel": "Convertir el modelo",
"statusModelConverted": "Modelo adaptado",
"statusMergingModels": "Fusionar modelos"
}, },
"gallery": { "gallery": {
"generations": "Generaciones", "generations": "Generaciones",
@ -284,16 +303,16 @@
"nameValidationMsg": "Introduce un nombre para tu modelo", "nameValidationMsg": "Introduce un nombre para tu modelo",
"description": "Descripción", "description": "Descripción",
"descriptionValidationMsg": "Introduce una descripción para tu modelo", "descriptionValidationMsg": "Introduce una descripción para tu modelo",
"config": "Config", "config": "Configurar",
"configValidationMsg": "Ruta del archivo de configuración del modelo", "configValidationMsg": "Ruta del archivo de configuración del modelo.",
"modelLocation": "Ubicación del Modelo", "modelLocation": "Ubicación del Modelo",
"modelLocationValidationMsg": "Ruta del archivo de modelo", "modelLocationValidationMsg": "Ruta del archivo de modelo.",
"vaeLocation": "Ubicación VAE", "vaeLocation": "Ubicación VAE",
"vaeLocationValidationMsg": "Ruta del archivo VAE", "vaeLocationValidationMsg": "Ruta del archivo VAE.",
"width": "Ancho", "width": "Ancho",
"widthValidationMsg": "Ancho predeterminado de tu modelo", "widthValidationMsg": "Ancho predeterminado de tu modelo.",
"height": "Alto", "height": "Alto",
"heightValidationMsg": "Alto predeterminado de tu modelo", "heightValidationMsg": "Alto predeterminado de tu modelo.",
"addModel": "Añadir Modelo", "addModel": "Añadir Modelo",
"updateModel": "Actualizar Modelo", "updateModel": "Actualizar Modelo",
"availableModels": "Modelos disponibles", "availableModels": "Modelos disponibles",
@ -320,7 +339,61 @@
"deleteModel": "Eliminar Modelo", "deleteModel": "Eliminar Modelo",
"deleteConfig": "Eliminar Configuración", "deleteConfig": "Eliminar Configuración",
"deleteMsg1": "¿Estás seguro de querer eliminar esta entrada de modelo de InvokeAI?", "deleteMsg1": "¿Estás seguro de querer eliminar esta entrada de modelo de InvokeAI?",
"deleteMsg2": "El checkpoint del modelo no se eliminará de tu disco. Puedes volver a añadirlo si lo deseas." "deleteMsg2": "El checkpoint del modelo no se eliminará de tu disco. Puedes volver a añadirlo si lo deseas.",
"safetensorModels": "SafeTensors",
"addDiffuserModel": "Añadir difusores",
"inpainting": "v1 Repintado",
"repoIDValidationMsg": "Repositorio en línea de tu modelo",
"checkpointModels": "Puntos de control",
"convertToDiffusersHelpText4": "Este proceso se realiza una sola vez. Puede tardar entre 30 y 60 segundos dependiendo de las especificaciones de tu ordenador.",
"diffusersModels": "Difusores",
"addCheckpointModel": "Agregar modelo de punto de control/Modelo Safetensor",
"vaeRepoID": "Identificador del repositorio de VAE",
"vaeRepoIDValidationMsg": "Repositorio en línea de tú VAE",
"formMessageDiffusersModelLocation": "Difusores Modelo Ubicación",
"formMessageDiffusersModelLocationDesc": "Por favor, introduzca al menos uno.",
"formMessageDiffusersVAELocation": "Ubicación VAE",
"formMessageDiffusersVAELocationDesc": "Si no se proporciona, InvokeAI buscará el archivo VAE dentro de la ubicación del modelo indicada anteriormente.",
"convert": "Convertir",
"convertToDiffusers": "Convertir en difusores",
"convertToDiffusersHelpText1": "Este modelo se convertirá al formato 🧨 Difusores.",
"convertToDiffusersHelpText2": "Este proceso sustituirá su entrada del Gestor de Modelos por la versión de Difusores del mismo modelo.",
"convertToDiffusersHelpText3": "Su archivo de puntos de control en el disco NO será borrado ni modificado de ninguna manera. Puede volver a añadir su punto de control al Gestor de Modelos si lo desea.",
"convertToDiffusersHelpText5": "Asegúrese de que dispone de suficiente espacio en disco. Los modelos suelen variar entre 4 GB y 7 GB de tamaño.",
"convertToDiffusersHelpText6": "¿Desea transformar este modelo?",
"convertToDiffusersSaveLocation": "Guardar ubicación",
"v1": "v1",
"v2": "v2",
"statusConverting": "Adaptar",
"modelConverted": "Modelo adaptado",
"sameFolder": "La misma carpeta",
"invokeRoot": "Carpeta InvokeAI",
"custom": "Personalizado",
"customSaveLocation": "Ubicación personalizada para guardar",
"merge": "Fusión",
"modelsMerged": "Modelos fusionados",
"mergeModels": "Combinar modelos",
"modelOne": "Modelo 1",
"modelTwo": "Modelo 2",
"modelThree": "Modelo 3",
"mergedModelName": "Nombre del modelo combinado",
"alpha": "Alfa",
"interpolationType": "Tipo de interpolación",
"mergedModelSaveLocation": "Guardar ubicación",
"mergedModelCustomSaveLocation": "Ruta personalizada",
"invokeAIFolder": "Invocar carpeta de la inteligencia artificial",
"modelMergeHeaderHelp2": "Sólo se pueden fusionar difusores. Si desea fusionar un modelo de punto de control, conviértalo primero en difusores.",
"modelMergeAlphaHelp": "Alfa controla la fuerza de mezcla de los modelos. Los valores alfa más bajos reducen la influencia del segundo modelo.",
"modelMergeInterpAddDifferenceHelp": "En este modo, el Modelo 3 se sustrae primero del Modelo 2. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente.",
"ignoreMismatch": "Ignorar discrepancias entre modelos seleccionados",
"modelMergeHeaderHelp1": "Puede combinar hasta tres modelos diferentes para crear una mezcla que se adapte a sus necesidades.",
"inverseSigmoid": "Sigmoideo inverso",
"weightedSum": "Modelo de suma ponderada",
"sigmoid": "Función sigmoide",
"allModels": "Todos los modelos",
"repo_id": "Identificador del repositorio",
"pathToCustomConfig": "Ruta a la configuración personalizada",
"customConfig": "Configuración personalizada"
}, },
"parameters": { "parameters": {
"images": "Imágenes", "images": "Imágenes",
@ -380,7 +453,22 @@
"info": "Información", "info": "Información",
"deleteImage": "Eliminar Imagen", "deleteImage": "Eliminar Imagen",
"initialImage": "Imagen Inicial", "initialImage": "Imagen Inicial",
"showOptionsPanel": "Mostrar panel de opciones" "showOptionsPanel": "Mostrar panel de opciones",
"symmetry": "Simetría",
"vSymmetryStep": "Paso de simetría V",
"hSymmetryStep": "Paso de simetría H",
"cancel": {
"immediate": "Cancelar inmediatamente",
"schedule": "Cancelar tras la iteración actual",
"isScheduled": "Cancelando",
"setType": "Tipo de cancelación"
},
"copyImage": "Copiar la imagen",
"general": "General",
"negativePrompts": "Preguntas negativas",
"imageToImage": "Imagen a imagen",
"denoisingStrength": "Intensidad de la eliminación del ruido",
"hiresStrength": "Alta resistencia"
}, },
"settings": { "settings": {
"models": "Modelos", "models": "Modelos",
@ -393,7 +481,8 @@
"resetWebUI": "Restablecer interfaz web", "resetWebUI": "Restablecer interfaz web",
"resetWebUIDesc1": "Al restablecer la interfaz web, solo se restablece la caché local del navegador de sus imágenes y la configuración guardada. No se elimina ninguna imagen de su disco duro.", "resetWebUIDesc1": "Al restablecer la interfaz web, solo se restablece la caché local del navegador de sus imágenes y la configuración guardada. No se elimina ninguna imagen de su disco duro.",
"resetWebUIDesc2": "Si las imágenes no se muestran en la galería o algo más no funciona, intente restablecer antes de reportar un incidente en GitHub.", "resetWebUIDesc2": "Si las imágenes no se muestran en la galería o algo más no funciona, intente restablecer antes de reportar un incidente en GitHub.",
"resetComplete": "La interfaz web se ha restablecido. Actualice la página para recargarla." "resetComplete": "La interfaz web se ha restablecido. Actualice la página para recargarla.",
"useSlidersForAll": "Utilice controles deslizantes para todas las opciones"
}, },
"toast": { "toast": {
"tempFoldersEmptied": "Directorio temporal vaciado", "tempFoldersEmptied": "Directorio temporal vaciado",
@ -431,12 +520,12 @@
"feature": { "feature": {
"prompt": "Este campo tomará todo el texto de entrada, incluidos tanto los términos de contenido como los estilísticos. Si bien se pueden incluir pesos en la solicitud, los comandos/parámetros estándar de línea de comandos no funcionarán.", "prompt": "Este campo tomará todo el texto de entrada, incluidos tanto los términos de contenido como los estilísticos. Si bien se pueden incluir pesos en la solicitud, los comandos/parámetros estándar de línea de comandos no funcionarán.",
"gallery": "Conforme se generan nuevas invocaciones, los archivos del directorio de salida se mostrarán aquí. Las generaciones tienen opciones adicionales para configurar nuevas generaciones.", "gallery": "Conforme se generan nuevas invocaciones, los archivos del directorio de salida se mostrarán aquí. Las generaciones tienen opciones adicionales para configurar nuevas generaciones.",
"other": "Estas opciones habilitarán modos de procesamiento alternativos para Invoke. El modo sin costuras funciona para generar patrones repetitivos en la salida. La optimización de alta resolución realiza un ciclo de generación de dos pasos y debe usarse en resoluciones más altas cuando desee una imagen/composición más coherente.", "other": "Estas opciones habilitarán modos de procesamiento alternativos para Invoke. 'Seamless mosaico' creará patrones repetitivos en la salida. 'Alta resolución' es la generación en dos pasos con img2img: use esta configuración cuando desee una imagen más grande y más coherente sin artefactos. tomar más tiempo de lo habitual txt2img.",
"seed": "Los valores de semilla proporcionan un conjunto inicial de ruido que guían el proceso de eliminación de ruido y se pueden aleatorizar o rellenar con una semilla de una invocación anterior. La función Umbral se puede usar para mitigar resultados indeseables a valores CFG más altos (intente entre 0-10), y Perlin se puede usar para agregar ruido Perlin al proceso de eliminación de ruido. Ambos sirven para agregar variación a sus salidas.", "seed": "Los valores de semilla proporcionan un conjunto inicial de ruido que guían el proceso de eliminación de ruido y se pueden aleatorizar o rellenar con una semilla de una invocación anterior. La función Umbral se puede usar para mitigar resultados indeseables a valores CFG más altos (intente entre 0-10), y Perlin se puede usar para agregar ruido Perlin al proceso de eliminación de ruido. Ambos sirven para agregar variación a sus salidas.",
"variations": "Pruebe una variación con una cantidad entre 0 y 1 para cambiar la imagen de salida para la semilla establecida. Se encuentran variaciones interesantes en la semilla entre 0.1 y 0.3.", "variations": "Pruebe una variación con una cantidad entre 0 y 1 para cambiar la imagen de salida para la semilla establecida. Se encuentran variaciones interesantes en la semilla entre 0.1 y 0.3.",
"upscale": "Usando ESRGAN, puede aumentar la resolución de salida sin requerir un ancho/alto más alto en la generación inicial.", "upscale": "Usando ESRGAN, puede aumentar la resolución de salida sin requerir un ancho/alto más alto en la generación inicial.",
"faceCorrection": "Usando GFPGAN o Codeformer, la corrección de rostros intentará identificar rostros en las salidas y corregir cualquier defecto/anormalidad. Los valores de fuerza más altos aplicarán una presión correctiva más fuerte en las salidas, lo que resultará en rostros más atractivos. Con Codeformer, una mayor fidelidad intentará preservar la imagen original, a expensas de la fuerza de corrección de rostros.", "faceCorrection": "Usando GFPGAN o Codeformer, la corrección de rostros intentará identificar rostros en las salidas y corregir cualquier defecto/anormalidad. Los valores de fuerza más altos aplicarán una presión correctiva más fuerte en las salidas, lo que resultará en rostros más atractivos. Con Codeformer, una mayor fidelidad intentará preservar la imagen original, a expensas de la fuerza de corrección de rostros.",
"imageToImage": "Imagen a Imagen permite cargar una imagen inicial, que InvokeAI usará para guiar el proceso de generación, junto con una solicitud. Un valor más bajo para esta configuración se parecerá más a la imagen original. Se aceptan valores entre 0-1, y se recomienda un rango de .25-.75.", "imageToImage": "Imagen a Imagen permite cargar una imagen inicial, que InvokeAI usará para guiar el proceso de generación, junto con una solicitud. Un valor más bajo para esta configuración se parecerá más a la imagen original. Se aceptan valores entre 0-1, y se recomienda un rango de .25-.75",
"boundingBox": "La caja delimitadora es análoga a las configuraciones de Ancho y Alto para Texto a Imagen o Imagen a Imagen. Solo se procesará el área en la caja.", "boundingBox": "La caja delimitadora es análoga a las configuraciones de Ancho y Alto para Texto a Imagen o Imagen a Imagen. Solo se procesará el área en la caja.",
"seamCorrection": "Controla el manejo de parches visibles que pueden ocurrir cuando se pega una imagen generada de nuevo en el lienzo.", "seamCorrection": "Controla el manejo de parches visibles que pueden ocurrir cuando se pega una imagen generada de nuevo en el lienzo.",
"infillAndScaling": "Administra los métodos de relleno (utilizados en áreas enmascaradas o borradas del lienzo) y la escala (útil para tamaños de caja delimitadora pequeños)." "infillAndScaling": "Administra los métodos de relleno (utilizados en áreas enmascaradas o borradas del lienzo) y la escala (útil para tamaños de caja delimitadora pequeños)."

View File

@ -44,7 +44,26 @@
"statusUpscaling": "Redimensinando", "statusUpscaling": "Redimensinando",
"statusUpscalingESRGAN": "Redimensinando (ESRGAN)", "statusUpscalingESRGAN": "Redimensinando (ESRGAN)",
"statusLoadingModel": "Carregando Modelo", "statusLoadingModel": "Carregando Modelo",
"statusModelChanged": "Modelo Alterado" "statusModelChanged": "Modelo Alterado",
"githubLabel": "Github",
"discordLabel": "Discord",
"langArabic": "Árabe",
"langEnglish": "Inglês",
"langDutch": "Holandês",
"langFrench": "Francês",
"langGerman": "Alemão",
"langItalian": "Italiano",
"langJapanese": "Japonês",
"langPolish": "Polonês",
"langSimplifiedChinese": "Chinês",
"langUkranian": "Ucraniano",
"back": "Voltar",
"statusConvertingModel": "Convertendo Modelo",
"statusModelConverted": "Modelo Convertido",
"statusMergingModels": "Mesclando Modelos",
"statusMergedModels": "Modelos Mesclados",
"langRussian": "Russo",
"langSpanish": "Espanhol"
}, },
"gallery": { "gallery": {
"generations": "Gerações", "generations": "Gerações",
@ -237,7 +256,7 @@
"desc": "Salva a tela atual na galeria" "desc": "Salva a tela atual na galeria"
}, },
"copyToClipboard": { "copyToClipboard": {
"title": "Copiar Para a Área de Transferência ", "title": "Copiar para a Área de Transferência",
"desc": "Copia a tela atual para a área de transferência" "desc": "Copia a tela atual para a área de transferência"
}, },
"downloadImage": { "downloadImage": {
@ -284,7 +303,7 @@
"nameValidationMsg": "Insira um nome para o seu modelo", "nameValidationMsg": "Insira um nome para o seu modelo",
"description": "Descrição", "description": "Descrição",
"descriptionValidationMsg": "Adicione uma descrição para o seu modelo", "descriptionValidationMsg": "Adicione uma descrição para o seu modelo",
"config": "Config", "config": "Configuração",
"configValidationMsg": "Caminho para o arquivo de configuração do seu modelo.", "configValidationMsg": "Caminho para o arquivo de configuração do seu modelo.",
"modelLocation": "Localização do modelo", "modelLocation": "Localização do modelo",
"modelLocationValidationMsg": "Caminho para onde seu modelo está localizado.", "modelLocationValidationMsg": "Caminho para onde seu modelo está localizado.",
@ -317,7 +336,52 @@
"deleteModel": "Excluir modelo", "deleteModel": "Excluir modelo",
"deleteConfig": "Excluir Config", "deleteConfig": "Excluir Config",
"deleteMsg1": "Tem certeza de que deseja excluir esta entrada do modelo de InvokeAI?", "deleteMsg1": "Tem certeza de que deseja excluir esta entrada do modelo de InvokeAI?",
"deleteMsg2": "Isso não vai excluir o arquivo de modelo checkpoint do seu disco. Você pode lê-los, se desejar." "deleteMsg2": "Isso não vai excluir o arquivo de modelo checkpoint do seu disco. Você pode lê-los, se desejar.",
"checkpointModels": "Checkpoints",
"diffusersModels": "Diffusers",
"safetensorModels": "SafeTensors",
"addCheckpointModel": "Adicionar Modelo de Checkpoint/Safetensor",
"addDiffuserModel": "Adicionar Diffusers",
"repo_id": "Repo ID",
"vaeRepoID": "VAE Repo ID",
"vaeRepoIDValidationMsg": "Repositório Online do seu VAE",
"scanAgain": "Digitalize Novamente",
"selectAndAdd": "Selecione e Adicione Modelos Listados Abaixo",
"noModelsFound": "Nenhum Modelo Encontrado",
"formMessageDiffusersModelLocation": "Localização dos Modelos Diffusers",
"formMessageDiffusersModelLocationDesc": "Por favor entre com ao menos um.",
"formMessageDiffusersVAELocation": "Localização do VAE",
"formMessageDiffusersVAELocationDesc": "Se não provido, InvokeAI irá procurar pelo arquivo VAE dentro do local do modelo.",
"convertToDiffusers": "Converter para Diffusers",
"convertToDiffusersHelpText1": "Este modelo será convertido para o formato 🧨 Diffusers.",
"convertToDiffusersHelpText5": "Por favor, certifique-se de que você tenha espaço suficiente em disco. Os modelos geralmente variam entre 4GB e 7GB de tamanho.",
"convertToDiffusersHelpText6": "Você deseja converter este modelo?",
"convertToDiffusersSaveLocation": "Local para Salvar",
"v1": "v1",
"v2": "v2",
"inpainting": "v1 Inpainting",
"customConfig": "Configuração personalizada",
"pathToCustomConfig": "Caminho para configuração personalizada",
"convertToDiffusersHelpText3": "Seu arquivo de ponto de verificação no disco NÃO será excluído ou modificado de forma alguma. Você pode adicionar seu ponto de verificação ao Gerenciador de modelos novamente, se desejar.",
"convertToDiffusersHelpText4": "Este é um processo único. Pode levar cerca de 30 a 60s, dependendo das especificações do seu computador.",
"merge": "Mesclar",
"modelsMerged": "Modelos mesclados",
"mergeModels": "Mesclar modelos",
"modelOne": "Modelo 1",
"modelTwo": "Modelo 2",
"modelThree": "Modelo 3",
"statusConverting": "Convertendo",
"modelConverted": "Modelo Convertido",
"sameFolder": "Mesma pasta",
"invokeRoot": "Pasta do InvokeAI",
"custom": "Personalizado",
"customSaveLocation": "Local de salvamento personalizado",
"mergedModelName": "Nome do modelo mesclado",
"alpha": "Alpha",
"allModels": "Todos os Modelos",
"repoIDValidationMsg": "Repositório Online do seu Modelo",
"convert": "Converter",
"convertToDiffusersHelpText2": "Este processo irá substituir sua entrada de Gerenciador de Modelos por uma versão Diffusers do mesmo modelo."
}, },
"parameters": { "parameters": {
"images": "Imagems", "images": "Imagems",
@ -442,14 +506,14 @@
"move": "Mover", "move": "Mover",
"resetView": "Resetar Visualização", "resetView": "Resetar Visualização",
"mergeVisible": "Fundir Visível", "mergeVisible": "Fundir Visível",
"saveToGallery": "Save To Gallery", "saveToGallery": "Salvar na Galeria",
"copyToClipboard": "Copiar para a Área de Transferência", "copyToClipboard": "Copiar para a Área de Transferência",
"downloadAsImage": "Baixar Como Imagem", "downloadAsImage": "Baixar Como Imagem",
"undo": "Desfazer", "undo": "Desfazer",
"redo": "Refazer", "redo": "Refazer",
"clearCanvas": "Limpar Tela", "clearCanvas": "Limpar Tela",
"canvasSettings": "Configurações de Tela", "canvasSettings": "Configurações de Tela",
"showIntermediates": "Show Intermediates", "showIntermediates": "Mostrar Intermediários",
"showGrid": "Mostrar Grade", "showGrid": "Mostrar Grade",
"snapToGrid": "Encaixar na Grade", "snapToGrid": "Encaixar na Grade",
"darkenOutsideSelection": "Escurecer Seleção Externa", "darkenOutsideSelection": "Escurecer Seleção Externa",

View File

@ -0,0 +1 @@
{}

View File

@ -63,7 +63,8 @@
"statusConvertingModel": "Converting Model", "statusConvertingModel": "Converting Model",
"statusModelConverted": "Model Converted", "statusModelConverted": "Model Converted",
"statusMergingModels": "Merging Models", "statusMergingModels": "Merging Models",
"statusMergedModels": "Models Merged" "statusMergedModels": "Models Merged",
"pinOptionsPanel": "Pin Options Panel"
}, },
"gallery": { "gallery": {
"generations": "Generations", "generations": "Generations",
@ -393,7 +394,9 @@
"modelMergeInterpAddDifferenceHelp": "In this mode, Model 3 is first subtracted from Model 2. The resulting version is blended with Model 1 with the alpha rate set above.", "modelMergeInterpAddDifferenceHelp": "In this mode, Model 3 is first subtracted from Model 2. The resulting version is blended with Model 1 with the alpha rate set above.",
"inverseSigmoid": "Inverse Sigmoid", "inverseSigmoid": "Inverse Sigmoid",
"sigmoid": "Sigmoid", "sigmoid": "Sigmoid",
"weightedSum": "Weighted Sum" "weightedSum": "Weighted Sum",
"none": "none",
"addDifference": "Add Difference"
}, },
"parameters": { "parameters": {
"general": "General", "general": "General",

View File

@ -15,7 +15,7 @@
"langSpanish": "Español", "langSpanish": "Español",
"nodesDesc": "Un sistema de generación de imágenes basado en nodos, actualmente se encuentra en desarrollo. Mantente pendiente a nuestras actualizaciones acerca de esta fabulosa funcionalidad.", "nodesDesc": "Un sistema de generación de imágenes basado en nodos, actualmente se encuentra en desarrollo. Mantente pendiente a nuestras actualizaciones acerca de esta fabulosa funcionalidad.",
"postProcessing": "Post-procesamiento", "postProcessing": "Post-procesamiento",
"postProcessDesc1": "Invoke AI ofrece una gran variedad de funciones de post-procesamiento, El aumento de tamaño y Restauración de Rostros ya se encuentran disponibles en la interfaz web, puedes acceder desde el menú de Opciones Avanzadas en las pestañas de Texto a Imagen y de Imagen a Imagen. También puedes acceder a estas funciones directamente mediante el botón de acciones en el menú superior de la imagen actual o en el visualizador", "postProcessDesc1": "Invoke AI ofrece una gran variedad de funciones de post-procesamiento, El aumento de tamaño y Restauración de Rostros ya se encuentran disponibles en la interfaz web, puedes acceder desde el menú de Opciones Avanzadas en las pestañas de Texto a Imagen y de Imagen a Imagen. También puedes acceder a estas funciones directamente mediante el botón de acciones en el menú superior de la imagen actual o en el visualizador.",
"postProcessDesc2": "Una interfaz de usuario dedicada se lanzará pronto para facilitar flujos de trabajo de postprocesamiento más avanzado.", "postProcessDesc2": "Una interfaz de usuario dedicada se lanzará pronto para facilitar flujos de trabajo de postprocesamiento más avanzado.",
"postProcessDesc3": "La Interfaz de Línea de Comandos de Invoke AI ofrece muchas otras características, incluyendo -Embiggen-.", "postProcessDesc3": "La Interfaz de Línea de Comandos de Invoke AI ofrece muchas otras características, incluyendo -Embiggen-.",
"training": "Entrenamiento", "training": "Entrenamiento",
@ -44,7 +44,26 @@
"statusUpscaling": "Aumentando Tamaño", "statusUpscaling": "Aumentando Tamaño",
"statusUpscalingESRGAN": "Restaurando Rostros(ESRGAN)", "statusUpscalingESRGAN": "Restaurando Rostros(ESRGAN)",
"statusLoadingModel": "Cargando Modelo", "statusLoadingModel": "Cargando Modelo",
"statusModelChanged": "Modelo cambiado" "statusModelChanged": "Modelo cambiado",
"statusMergedModels": "Modelos combinados",
"githubLabel": "Github",
"discordLabel": "Discord",
"langEnglish": "Inglés",
"langDutch": "Holandés",
"langFrench": "Francés",
"langGerman": "Alemán",
"langItalian": "Italiano",
"langArabic": "Árabe",
"langJapanese": "Japones",
"langPolish": "Polaco",
"langBrPortuguese": "Portugués brasileño",
"langRussian": "Ruso",
"langSimplifiedChinese": "Chino simplificado",
"langUkranian": "Ucraniano",
"back": "Atrás",
"statusConvertingModel": "Convertir el modelo",
"statusModelConverted": "Modelo adaptado",
"statusMergingModels": "Fusionar modelos"
}, },
"gallery": { "gallery": {
"generations": "Generaciones", "generations": "Generaciones",
@ -284,16 +303,16 @@
"nameValidationMsg": "Introduce un nombre para tu modelo", "nameValidationMsg": "Introduce un nombre para tu modelo",
"description": "Descripción", "description": "Descripción",
"descriptionValidationMsg": "Introduce una descripción para tu modelo", "descriptionValidationMsg": "Introduce una descripción para tu modelo",
"config": "Config", "config": "Configurar",
"configValidationMsg": "Ruta del archivo de configuración del modelo", "configValidationMsg": "Ruta del archivo de configuración del modelo.",
"modelLocation": "Ubicación del Modelo", "modelLocation": "Ubicación del Modelo",
"modelLocationValidationMsg": "Ruta del archivo de modelo", "modelLocationValidationMsg": "Ruta del archivo de modelo.",
"vaeLocation": "Ubicación VAE", "vaeLocation": "Ubicación VAE",
"vaeLocationValidationMsg": "Ruta del archivo VAE", "vaeLocationValidationMsg": "Ruta del archivo VAE.",
"width": "Ancho", "width": "Ancho",
"widthValidationMsg": "Ancho predeterminado de tu modelo", "widthValidationMsg": "Ancho predeterminado de tu modelo.",
"height": "Alto", "height": "Alto",
"heightValidationMsg": "Alto predeterminado de tu modelo", "heightValidationMsg": "Alto predeterminado de tu modelo.",
"addModel": "Añadir Modelo", "addModel": "Añadir Modelo",
"updateModel": "Actualizar Modelo", "updateModel": "Actualizar Modelo",
"availableModels": "Modelos disponibles", "availableModels": "Modelos disponibles",
@ -320,7 +339,61 @@
"deleteModel": "Eliminar Modelo", "deleteModel": "Eliminar Modelo",
"deleteConfig": "Eliminar Configuración", "deleteConfig": "Eliminar Configuración",
"deleteMsg1": "¿Estás seguro de querer eliminar esta entrada de modelo de InvokeAI?", "deleteMsg1": "¿Estás seguro de querer eliminar esta entrada de modelo de InvokeAI?",
"deleteMsg2": "El checkpoint del modelo no se eliminará de tu disco. Puedes volver a añadirlo si lo deseas." "deleteMsg2": "El checkpoint del modelo no se eliminará de tu disco. Puedes volver a añadirlo si lo deseas.",
"safetensorModels": "SafeTensors",
"addDiffuserModel": "Añadir difusores",
"inpainting": "v1 Repintado",
"repoIDValidationMsg": "Repositorio en línea de tu modelo",
"checkpointModels": "Puntos de control",
"convertToDiffusersHelpText4": "Este proceso se realiza una sola vez. Puede tardar entre 30 y 60 segundos dependiendo de las especificaciones de tu ordenador.",
"diffusersModels": "Difusores",
"addCheckpointModel": "Agregar modelo de punto de control/Modelo Safetensor",
"vaeRepoID": "Identificador del repositorio de VAE",
"vaeRepoIDValidationMsg": "Repositorio en línea de tú VAE",
"formMessageDiffusersModelLocation": "Difusores Modelo Ubicación",
"formMessageDiffusersModelLocationDesc": "Por favor, introduzca al menos uno.",
"formMessageDiffusersVAELocation": "Ubicación VAE",
"formMessageDiffusersVAELocationDesc": "Si no se proporciona, InvokeAI buscará el archivo VAE dentro de la ubicación del modelo indicada anteriormente.",
"convert": "Convertir",
"convertToDiffusers": "Convertir en difusores",
"convertToDiffusersHelpText1": "Este modelo se convertirá al formato 🧨 Difusores.",
"convertToDiffusersHelpText2": "Este proceso sustituirá su entrada del Gestor de Modelos por la versión de Difusores del mismo modelo.",
"convertToDiffusersHelpText3": "Su archivo de puntos de control en el disco NO será borrado ni modificado de ninguna manera. Puede volver a añadir su punto de control al Gestor de Modelos si lo desea.",
"convertToDiffusersHelpText5": "Asegúrese de que dispone de suficiente espacio en disco. Los modelos suelen variar entre 4 GB y 7 GB de tamaño.",
"convertToDiffusersHelpText6": "¿Desea transformar este modelo?",
"convertToDiffusersSaveLocation": "Guardar ubicación",
"v1": "v1",
"v2": "v2",
"statusConverting": "Adaptar",
"modelConverted": "Modelo adaptado",
"sameFolder": "La misma carpeta",
"invokeRoot": "Carpeta InvokeAI",
"custom": "Personalizado",
"customSaveLocation": "Ubicación personalizada para guardar",
"merge": "Fusión",
"modelsMerged": "Modelos fusionados",
"mergeModels": "Combinar modelos",
"modelOne": "Modelo 1",
"modelTwo": "Modelo 2",
"modelThree": "Modelo 3",
"mergedModelName": "Nombre del modelo combinado",
"alpha": "Alfa",
"interpolationType": "Tipo de interpolación",
"mergedModelSaveLocation": "Guardar ubicación",
"mergedModelCustomSaveLocation": "Ruta personalizada",
"invokeAIFolder": "Invocar carpeta de la inteligencia artificial",
"modelMergeHeaderHelp2": "Sólo se pueden fusionar difusores. Si desea fusionar un modelo de punto de control, conviértalo primero en difusores.",
"modelMergeAlphaHelp": "Alfa controla la fuerza de mezcla de los modelos. Los valores alfa más bajos reducen la influencia del segundo modelo.",
"modelMergeInterpAddDifferenceHelp": "En este modo, el Modelo 3 se sustrae primero del Modelo 2. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente.",
"ignoreMismatch": "Ignorar discrepancias entre modelos seleccionados",
"modelMergeHeaderHelp1": "Puede combinar hasta tres modelos diferentes para crear una mezcla que se adapte a sus necesidades.",
"inverseSigmoid": "Sigmoideo inverso",
"weightedSum": "Modelo de suma ponderada",
"sigmoid": "Función sigmoide",
"allModels": "Todos los modelos",
"repo_id": "Identificador del repositorio",
"pathToCustomConfig": "Ruta a la configuración personalizada",
"customConfig": "Configuración personalizada"
}, },
"parameters": { "parameters": {
"images": "Imágenes", "images": "Imágenes",
@ -380,7 +453,22 @@
"info": "Información", "info": "Información",
"deleteImage": "Eliminar Imagen", "deleteImage": "Eliminar Imagen",
"initialImage": "Imagen Inicial", "initialImage": "Imagen Inicial",
"showOptionsPanel": "Mostrar panel de opciones" "showOptionsPanel": "Mostrar panel de opciones",
"symmetry": "Simetría",
"vSymmetryStep": "Paso de simetría V",
"hSymmetryStep": "Paso de simetría H",
"cancel": {
"immediate": "Cancelar inmediatamente",
"schedule": "Cancelar tras la iteración actual",
"isScheduled": "Cancelando",
"setType": "Tipo de cancelación"
},
"copyImage": "Copiar la imagen",
"general": "General",
"negativePrompts": "Preguntas negativas",
"imageToImage": "Imagen a imagen",
"denoisingStrength": "Intensidad de la eliminación del ruido",
"hiresStrength": "Alta resistencia"
}, },
"settings": { "settings": {
"models": "Modelos", "models": "Modelos",
@ -393,7 +481,8 @@
"resetWebUI": "Restablecer interfaz web", "resetWebUI": "Restablecer interfaz web",
"resetWebUIDesc1": "Al restablecer la interfaz web, solo se restablece la caché local del navegador de sus imágenes y la configuración guardada. No se elimina ninguna imagen de su disco duro.", "resetWebUIDesc1": "Al restablecer la interfaz web, solo se restablece la caché local del navegador de sus imágenes y la configuración guardada. No se elimina ninguna imagen de su disco duro.",
"resetWebUIDesc2": "Si las imágenes no se muestran en la galería o algo más no funciona, intente restablecer antes de reportar un incidente en GitHub.", "resetWebUIDesc2": "Si las imágenes no se muestran en la galería o algo más no funciona, intente restablecer antes de reportar un incidente en GitHub.",
"resetComplete": "La interfaz web se ha restablecido. Actualice la página para recargarla." "resetComplete": "La interfaz web se ha restablecido. Actualice la página para recargarla.",
"useSlidersForAll": "Utilice controles deslizantes para todas las opciones"
}, },
"toast": { "toast": {
"tempFoldersEmptied": "Directorio temporal vaciado", "tempFoldersEmptied": "Directorio temporal vaciado",
@ -431,12 +520,12 @@
"feature": { "feature": {
"prompt": "Este campo tomará todo el texto de entrada, incluidos tanto los términos de contenido como los estilísticos. Si bien se pueden incluir pesos en la solicitud, los comandos/parámetros estándar de línea de comandos no funcionarán.", "prompt": "Este campo tomará todo el texto de entrada, incluidos tanto los términos de contenido como los estilísticos. Si bien se pueden incluir pesos en la solicitud, los comandos/parámetros estándar de línea de comandos no funcionarán.",
"gallery": "Conforme se generan nuevas invocaciones, los archivos del directorio de salida se mostrarán aquí. Las generaciones tienen opciones adicionales para configurar nuevas generaciones.", "gallery": "Conforme se generan nuevas invocaciones, los archivos del directorio de salida se mostrarán aquí. Las generaciones tienen opciones adicionales para configurar nuevas generaciones.",
"other": "Estas opciones habilitarán modos de procesamiento alternativos para Invoke. El modo sin costuras funciona para generar patrones repetitivos en la salida. La optimización de alta resolución realiza un ciclo de generación de dos pasos y debe usarse en resoluciones más altas cuando desee una imagen/composición más coherente.", "other": "Estas opciones habilitarán modos de procesamiento alternativos para Invoke. 'Seamless mosaico' creará patrones repetitivos en la salida. 'Alta resolución' es la generación en dos pasos con img2img: use esta configuración cuando desee una imagen más grande y más coherente sin artefactos. tomar más tiempo de lo habitual txt2img.",
"seed": "Los valores de semilla proporcionan un conjunto inicial de ruido que guían el proceso de eliminación de ruido y se pueden aleatorizar o rellenar con una semilla de una invocación anterior. La función Umbral se puede usar para mitigar resultados indeseables a valores CFG más altos (intente entre 0-10), y Perlin se puede usar para agregar ruido Perlin al proceso de eliminación de ruido. Ambos sirven para agregar variación a sus salidas.", "seed": "Los valores de semilla proporcionan un conjunto inicial de ruido que guían el proceso de eliminación de ruido y se pueden aleatorizar o rellenar con una semilla de una invocación anterior. La función Umbral se puede usar para mitigar resultados indeseables a valores CFG más altos (intente entre 0-10), y Perlin se puede usar para agregar ruido Perlin al proceso de eliminación de ruido. Ambos sirven para agregar variación a sus salidas.",
"variations": "Pruebe una variación con una cantidad entre 0 y 1 para cambiar la imagen de salida para la semilla establecida. Se encuentran variaciones interesantes en la semilla entre 0.1 y 0.3.", "variations": "Pruebe una variación con una cantidad entre 0 y 1 para cambiar la imagen de salida para la semilla establecida. Se encuentran variaciones interesantes en la semilla entre 0.1 y 0.3.",
"upscale": "Usando ESRGAN, puede aumentar la resolución de salida sin requerir un ancho/alto más alto en la generación inicial.", "upscale": "Usando ESRGAN, puede aumentar la resolución de salida sin requerir un ancho/alto más alto en la generación inicial.",
"faceCorrection": "Usando GFPGAN o Codeformer, la corrección de rostros intentará identificar rostros en las salidas y corregir cualquier defecto/anormalidad. Los valores de fuerza más altos aplicarán una presión correctiva más fuerte en las salidas, lo que resultará en rostros más atractivos. Con Codeformer, una mayor fidelidad intentará preservar la imagen original, a expensas de la fuerza de corrección de rostros.", "faceCorrection": "Usando GFPGAN o Codeformer, la corrección de rostros intentará identificar rostros en las salidas y corregir cualquier defecto/anormalidad. Los valores de fuerza más altos aplicarán una presión correctiva más fuerte en las salidas, lo que resultará en rostros más atractivos. Con Codeformer, una mayor fidelidad intentará preservar la imagen original, a expensas de la fuerza de corrección de rostros.",
"imageToImage": "Imagen a Imagen permite cargar una imagen inicial, que InvokeAI usará para guiar el proceso de generación, junto con una solicitud. Un valor más bajo para esta configuración se parecerá más a la imagen original. Se aceptan valores entre 0-1, y se recomienda un rango de .25-.75.", "imageToImage": "Imagen a Imagen permite cargar una imagen inicial, que InvokeAI usará para guiar el proceso de generación, junto con una solicitud. Un valor más bajo para esta configuración se parecerá más a la imagen original. Se aceptan valores entre 0-1, y se recomienda un rango de .25-.75",
"boundingBox": "La caja delimitadora es análoga a las configuraciones de Ancho y Alto para Texto a Imagen o Imagen a Imagen. Solo se procesará el área en la caja.", "boundingBox": "La caja delimitadora es análoga a las configuraciones de Ancho y Alto para Texto a Imagen o Imagen a Imagen. Solo se procesará el área en la caja.",
"seamCorrection": "Controla el manejo de parches visibles que pueden ocurrir cuando se pega una imagen generada de nuevo en el lienzo.", "seamCorrection": "Controla el manejo de parches visibles que pueden ocurrir cuando se pega una imagen generada de nuevo en el lienzo.",
"infillAndScaling": "Administra los métodos de relleno (utilizados en áreas enmascaradas o borradas del lienzo) y la escala (útil para tamaños de caja delimitadora pequeños)." "infillAndScaling": "Administra los métodos de relleno (utilizados en áreas enmascaradas o borradas del lienzo) y la escala (útil para tamaños de caja delimitadora pequeños)."

View File

@ -44,7 +44,26 @@
"statusUpscaling": "Redimensinando", "statusUpscaling": "Redimensinando",
"statusUpscalingESRGAN": "Redimensinando (ESRGAN)", "statusUpscalingESRGAN": "Redimensinando (ESRGAN)",
"statusLoadingModel": "Carregando Modelo", "statusLoadingModel": "Carregando Modelo",
"statusModelChanged": "Modelo Alterado" "statusModelChanged": "Modelo Alterado",
"githubLabel": "Github",
"discordLabel": "Discord",
"langArabic": "Árabe",
"langEnglish": "Inglês",
"langDutch": "Holandês",
"langFrench": "Francês",
"langGerman": "Alemão",
"langItalian": "Italiano",
"langJapanese": "Japonês",
"langPolish": "Polonês",
"langSimplifiedChinese": "Chinês",
"langUkranian": "Ucraniano",
"back": "Voltar",
"statusConvertingModel": "Convertendo Modelo",
"statusModelConverted": "Modelo Convertido",
"statusMergingModels": "Mesclando Modelos",
"statusMergedModels": "Modelos Mesclados",
"langRussian": "Russo",
"langSpanish": "Espanhol"
}, },
"gallery": { "gallery": {
"generations": "Gerações", "generations": "Gerações",
@ -237,7 +256,7 @@
"desc": "Salva a tela atual na galeria" "desc": "Salva a tela atual na galeria"
}, },
"copyToClipboard": { "copyToClipboard": {
"title": "Copiar Para a Área de Transferência ", "title": "Copiar para a Área de Transferência",
"desc": "Copia a tela atual para a área de transferência" "desc": "Copia a tela atual para a área de transferência"
}, },
"downloadImage": { "downloadImage": {
@ -284,7 +303,7 @@
"nameValidationMsg": "Insira um nome para o seu modelo", "nameValidationMsg": "Insira um nome para o seu modelo",
"description": "Descrição", "description": "Descrição",
"descriptionValidationMsg": "Adicione uma descrição para o seu modelo", "descriptionValidationMsg": "Adicione uma descrição para o seu modelo",
"config": "Config", "config": "Configuração",
"configValidationMsg": "Caminho para o arquivo de configuração do seu modelo.", "configValidationMsg": "Caminho para o arquivo de configuração do seu modelo.",
"modelLocation": "Localização do modelo", "modelLocation": "Localização do modelo",
"modelLocationValidationMsg": "Caminho para onde seu modelo está localizado.", "modelLocationValidationMsg": "Caminho para onde seu modelo está localizado.",
@ -317,7 +336,52 @@
"deleteModel": "Excluir modelo", "deleteModel": "Excluir modelo",
"deleteConfig": "Excluir Config", "deleteConfig": "Excluir Config",
"deleteMsg1": "Tem certeza de que deseja excluir esta entrada do modelo de InvokeAI?", "deleteMsg1": "Tem certeza de que deseja excluir esta entrada do modelo de InvokeAI?",
"deleteMsg2": "Isso não vai excluir o arquivo de modelo checkpoint do seu disco. Você pode lê-los, se desejar." "deleteMsg2": "Isso não vai excluir o arquivo de modelo checkpoint do seu disco. Você pode lê-los, se desejar.",
"checkpointModels": "Checkpoints",
"diffusersModels": "Diffusers",
"safetensorModels": "SafeTensors",
"addCheckpointModel": "Adicionar Modelo de Checkpoint/Safetensor",
"addDiffuserModel": "Adicionar Diffusers",
"repo_id": "Repo ID",
"vaeRepoID": "VAE Repo ID",
"vaeRepoIDValidationMsg": "Repositório Online do seu VAE",
"scanAgain": "Digitalize Novamente",
"selectAndAdd": "Selecione e Adicione Modelos Listados Abaixo",
"noModelsFound": "Nenhum Modelo Encontrado",
"formMessageDiffusersModelLocation": "Localização dos Modelos Diffusers",
"formMessageDiffusersModelLocationDesc": "Por favor entre com ao menos um.",
"formMessageDiffusersVAELocation": "Localização do VAE",
"formMessageDiffusersVAELocationDesc": "Se não provido, InvokeAI irá procurar pelo arquivo VAE dentro do local do modelo.",
"convertToDiffusers": "Converter para Diffusers",
"convertToDiffusersHelpText1": "Este modelo será convertido para o formato 🧨 Diffusers.",
"convertToDiffusersHelpText5": "Por favor, certifique-se de que você tenha espaço suficiente em disco. Os modelos geralmente variam entre 4GB e 7GB de tamanho.",
"convertToDiffusersHelpText6": "Você deseja converter este modelo?",
"convertToDiffusersSaveLocation": "Local para Salvar",
"v1": "v1",
"v2": "v2",
"inpainting": "v1 Inpainting",
"customConfig": "Configuração personalizada",
"pathToCustomConfig": "Caminho para configuração personalizada",
"convertToDiffusersHelpText3": "Seu arquivo de ponto de verificação no disco NÃO será excluído ou modificado de forma alguma. Você pode adicionar seu ponto de verificação ao Gerenciador de modelos novamente, se desejar.",
"convertToDiffusersHelpText4": "Este é um processo único. Pode levar cerca de 30 a 60s, dependendo das especificações do seu computador.",
"merge": "Mesclar",
"modelsMerged": "Modelos mesclados",
"mergeModels": "Mesclar modelos",
"modelOne": "Modelo 1",
"modelTwo": "Modelo 2",
"modelThree": "Modelo 3",
"statusConverting": "Convertendo",
"modelConverted": "Modelo Convertido",
"sameFolder": "Mesma pasta",
"invokeRoot": "Pasta do InvokeAI",
"custom": "Personalizado",
"customSaveLocation": "Local de salvamento personalizado",
"mergedModelName": "Nome do modelo mesclado",
"alpha": "Alpha",
"allModels": "Todos os Modelos",
"repoIDValidationMsg": "Repositório Online do seu Modelo",
"convert": "Converter",
"convertToDiffusersHelpText2": "Este processo irá substituir sua entrada de Gerenciador de Modelos por uma versão Diffusers do mesmo modelo."
}, },
"parameters": { "parameters": {
"images": "Imagems", "images": "Imagems",
@ -442,14 +506,14 @@
"move": "Mover", "move": "Mover",
"resetView": "Resetar Visualização", "resetView": "Resetar Visualização",
"mergeVisible": "Fundir Visível", "mergeVisible": "Fundir Visível",
"saveToGallery": "Save To Gallery", "saveToGallery": "Salvar na Galeria",
"copyToClipboard": "Copiar para a Área de Transferência", "copyToClipboard": "Copiar para a Área de Transferência",
"downloadAsImage": "Baixar Como Imagem", "downloadAsImage": "Baixar Como Imagem",
"undo": "Desfazer", "undo": "Desfazer",
"redo": "Refazer", "redo": "Refazer",
"clearCanvas": "Limpar Tela", "clearCanvas": "Limpar Tela",
"canvasSettings": "Configurações de Tela", "canvasSettings": "Configurações de Tela",
"showIntermediates": "Show Intermediates", "showIntermediates": "Mostrar Intermediários",
"showGrid": "Mostrar Grade", "showGrid": "Mostrar Grade",
"snapToGrid": "Encaixar na Grade", "snapToGrid": "Encaixar na Grade",
"darkenOutsideSelection": "Escurecer Seleção Externa", "darkenOutsideSelection": "Escurecer Seleção Externa",

View File

@ -0,0 +1 @@
{}

View File

@ -392,7 +392,7 @@ const makeSocketIOListeners = (
addLogEntry({ addLogEntry({
timestamp: dateFormat(new Date(), 'isoDateTime'), timestamp: dateFormat(new Date(), 'isoDateTime'),
message: `${i18n.t( message: `${i18n.t(
'modelmanager:modelAdded' 'modelManager.modelAdded'
)}: ${deleted_model_name}`, )}: ${deleted_model_name}`,
level: 'info', level: 'info',
}) })
@ -400,7 +400,7 @@ const makeSocketIOListeners = (
dispatch( dispatch(
addToast({ addToast({
title: `${i18n.t( title: `${i18n.t(
'modelmanager:modelEntryDeleted' 'modelManager.modelEntryDeleted'
)}: ${deleted_model_name}`, )}: ${deleted_model_name}`,
status: 'success', status: 'success',
duration: 2500, duration: 2500,
@ -424,7 +424,7 @@ const makeSocketIOListeners = (
dispatch( dispatch(
addToast({ addToast({
title: `${i18n.t( title: `${i18n.t(
'modelmanager:modelConverted' 'modelManager.modelConverted'
)}: ${converted_model_name}`, )}: ${converted_model_name}`,
status: 'success', status: 'success',
duration: 2500, duration: 2500,

View File

@ -144,8 +144,8 @@ export const frontendToBackendParameters = (
variationAmount, variationAmount,
width, width,
shouldUseSymmetry, shouldUseSymmetry,
horizontalSymmetryTimePercentage, horizontalSymmetrySteps,
verticalSymmetryTimePercentage, verticalSymmetrySteps,
} = generationState; } = generationState;
const { const {
@ -185,17 +185,17 @@ export const frontendToBackendParameters = (
// Symmetry Settings // Symmetry Settings
if (shouldUseSymmetry) { if (shouldUseSymmetry) {
if (horizontalSymmetryTimePercentage > 0) { if (horizontalSymmetrySteps > 0) {
generationParameters.h_symmetry_time_pct = Math.max( generationParameters.h_symmetry_time_pct = Math.max(
0, 0,
Math.min(1, horizontalSymmetryTimePercentage / steps) Math.min(1, horizontalSymmetrySteps / steps)
); );
} }
if (horizontalSymmetryTimePercentage > 0) { if (verticalSymmetrySteps > 0) {
generationParameters.v_symmetry_time_pct = Math.max( generationParameters.v_symmetry_time_pct = Math.max(
0, 0,
Math.min(1, verticalSymmetryTimePercentage / steps) Math.min(1, verticalSymmetrySteps / steps)
); );
} }
} }

View File

@ -109,7 +109,7 @@ const IAICanvasStatusText = () => {
color: boundingBoxColor, color: boundingBoxColor,
}} }}
>{`${t( >{`${t(
'unifiedcanvas:boundingBox' 'unifiedCanvas.boundingBox'
)}: ${boundingBoxDimensionsString}`}</div> )}: ${boundingBoxDimensionsString}`}</div>
)} )}
{shouldShowScaledBoundingBox && ( {shouldShowScaledBoundingBox && (
@ -118,19 +118,19 @@ const IAICanvasStatusText = () => {
color: boundingBoxColor, color: boundingBoxColor,
}} }}
>{`${t( >{`${t(
'unifiedcanvas:scaledBoundingBox' 'unifiedCanvas.scaledBoundingBox'
)}: ${scaledBoundingBoxDimensionsString}`}</div> )}: ${scaledBoundingBoxDimensionsString}`}</div>
)} )}
{shouldShowCanvasDebugInfo && ( {shouldShowCanvasDebugInfo && (
<> <>
<div>{`${t( <div>{`${t(
'unifiedcanvas:boundingBoxPosition' 'unifiedCanvas.boundingBoxPosition'
)}: ${boundingBoxCoordinatesString}`}</div> )}: ${boundingBoxCoordinatesString}`}</div>
<div>{`${t( <div>{`${t(
'unifiedcanvas:canvasDimensions' 'unifiedCanvas.canvasDimensions'
)}: ${canvasDimensionsString}`}</div> )}: ${canvasDimensionsString}`}</div>
<div>{`${t( <div>{`${t(
'unifiedcanvas:canvasPosition' 'unifiedCanvas.canvasPosition'
)}: ${canvasCoordinatesString}`}</div> )}: ${canvasCoordinatesString}`}</div>
<IAICanvasStatusTextCursorPos /> <IAICanvasStatusTextCursorPos />
</> </>

View File

@ -34,7 +34,7 @@ export default function IAICanvasStatusTextCursorPos() {
return ( return (
<div>{`${t( <div>{`${t(
'unifiedcanvas:cursorPosition' 'unifiedCanvas.cursorPosition'
)}: ${cursorCoordinatesString}`}</div> )}: ${cursorCoordinatesString}`}</div>
); );
} }

View File

@ -2,18 +2,18 @@ import { RootState } from 'app/store';
import { useAppDispatch, useAppSelector } from 'app/storeHooks'; import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAISlider from 'common/components/IAISlider'; import IAISlider from 'common/components/IAISlider';
import { import {
setHorizontalSymmetryTimePercentage, setHorizontalSymmetrySteps,
setVerticalSymmetryTimePercentage, setVerticalSymmetrySteps,
} from 'features/parameters/store/generationSlice'; } from 'features/parameters/store/generationSlice';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
export default function SymmetrySettings() { export default function SymmetrySettings() {
const horizontalSymmetryTimePercentage = useAppSelector( const horizontalSymmetrySteps = useAppSelector(
(state: RootState) => state.generation.horizontalSymmetryTimePercentage (state: RootState) => state.generation.horizontalSymmetrySteps
); );
const verticalSymmetryTimePercentage = useAppSelector( const verticalSymmetrySteps = useAppSelector(
(state: RootState) => state.generation.verticalSymmetryTimePercentage (state: RootState) => state.generation.verticalSymmetrySteps
); );
const steps = useAppSelector((state: RootState) => state.generation.steps); const steps = useAppSelector((state: RootState) => state.generation.steps);
@ -26,28 +26,28 @@ export default function SymmetrySettings() {
<> <>
<IAISlider <IAISlider
label={t('parameters.hSymmetryStep')} label={t('parameters.hSymmetryStep')}
value={horizontalSymmetryTimePercentage} value={horizontalSymmetrySteps}
onChange={(v) => dispatch(setHorizontalSymmetryTimePercentage(v))} onChange={(v) => dispatch(setHorizontalSymmetrySteps(v))}
min={0} min={0}
max={steps} max={steps}
step={1} step={1}
withInput withInput
withSliderMarks withSliderMarks
withReset withReset
handleReset={() => dispatch(setHorizontalSymmetryTimePercentage(0))} handleReset={() => dispatch(setHorizontalSymmetrySteps(0))}
sliderMarkRightOffset={-6} sliderMarkRightOffset={-6}
></IAISlider> ></IAISlider>
<IAISlider <IAISlider
label={t('parameters.vSymmetryStep')} label={t('parameters.vSymmetryStep')}
value={verticalSymmetryTimePercentage} value={verticalSymmetrySteps}
onChange={(v) => dispatch(setVerticalSymmetryTimePercentage(v))} onChange={(v) => dispatch(setVerticalSymmetrySteps(v))}
min={0} min={0}
max={steps} max={steps}
step={1} step={1}
withInput withInput
withSliderMarks withSliderMarks
withReset withReset
handleReset={() => dispatch(setVerticalSymmetryTimePercentage(0))} handleReset={() => dispatch(setVerticalSymmetrySteps(0))}
sliderMarkRightOffset={-6} sliderMarkRightOffset={-6}
></IAISlider> ></IAISlider>
</> </>

View File

@ -3,7 +3,10 @@ import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import IAINumberInput from 'common/components/IAINumberInput'; import IAINumberInput from 'common/components/IAINumberInput';
import IAISlider from 'common/components/IAISlider'; import IAISlider from 'common/components/IAISlider';
import { setSteps } from 'features/parameters/store/generationSlice'; import {
clampSymmetrySteps,
setSteps,
} from 'features/parameters/store/generationSlice';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
export default function MainSteps() { export default function MainSteps() {
@ -14,7 +17,13 @@ export default function MainSteps() {
); );
const { t } = useTranslation(); const { t } = useTranslation();
const handleChangeSteps = (v: number) => dispatch(setSteps(v)); const handleChangeSteps = (v: number) => {
dispatch(setSteps(v));
};
const handleBlur = () => {
dispatch(clampSymmetrySteps());
};
return shouldUseSliders ? ( return shouldUseSliders ? (
<IAISlider <IAISlider
@ -41,6 +50,7 @@ export default function MainSteps() {
width="auto" width="auto"
styleClass="main-settings-block" styleClass="main-settings-block"
textAlign="center" textAlign="center"
onBlur={handleBlur}
/> />
); );
} }

View File

@ -5,6 +5,7 @@ import IAIButton, { IAIButtonProps } from 'common/components/IAIButton';
import IAIIconButton, { import IAIIconButton, {
IAIIconButtonProps, IAIIconButtonProps,
} from 'common/components/IAIIconButton'; } from 'common/components/IAIIconButton';
import { clampSymmetrySteps } from 'features/parameters/store/generationSlice';
import { activeTabNameSelector } from 'features/ui/store/uiSelectors'; import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
import { useHotkeys } from 'react-hotkeys-hook'; import { useHotkeys } from 'react-hotkeys-hook';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
@ -30,6 +31,7 @@ export default function InvokeButton(props: InvokeButton) {
useHotkeys( useHotkeys(
['ctrl+enter', 'meta+enter'], ['ctrl+enter', 'meta+enter'],
() => { () => {
dispatch(clampSymmetrySteps());
dispatch(generateImage(activeTabName)); dispatch(generateImage(activeTabName));
}, },
{ {

View File

@ -4,6 +4,7 @@ import * as InvokeAI from 'app/invokeai';
import { getPromptAndNegative } from 'common/util/getPromptAndNegative'; import { getPromptAndNegative } from 'common/util/getPromptAndNegative';
import promptToString from 'common/util/promptToString'; import promptToString from 'common/util/promptToString';
import { seedWeightsToString } from 'common/util/seedWeightPairs'; import { seedWeightsToString } from 'common/util/seedWeightPairs';
import { clamp } from 'lodash';
export interface GenerationState { export interface GenerationState {
cfgScale: number; cfgScale: number;
@ -33,8 +34,8 @@ export interface GenerationState {
variationAmount: number; variationAmount: number;
width: number; width: number;
shouldUseSymmetry: boolean; shouldUseSymmetry: boolean;
horizontalSymmetryTimePercentage: number; horizontalSymmetrySteps: number;
verticalSymmetryTimePercentage: number; verticalSymmetrySteps: number;
} }
const initialGenerationState: GenerationState = { const initialGenerationState: GenerationState = {
@ -64,8 +65,8 @@ const initialGenerationState: GenerationState = {
variationAmount: 0.1, variationAmount: 0.1,
width: 512, width: 512,
shouldUseSymmetry: false, shouldUseSymmetry: false,
horizontalSymmetryTimePercentage: 0, horizontalSymmetrySteps: 0,
verticalSymmetryTimePercentage: 0, verticalSymmetrySteps: 0,
}; };
const initialState: GenerationState = initialGenerationState; const initialState: GenerationState = initialGenerationState;
@ -99,6 +100,18 @@ export const generationSlice = createSlice({
setSteps: (state, action: PayloadAction<number>) => { setSteps: (state, action: PayloadAction<number>) => {
state.steps = action.payload; state.steps = action.payload;
}, },
clampSymmetrySteps: (state) => {
state.horizontalSymmetrySteps = clamp(
state.horizontalSymmetrySteps,
0,
state.steps
);
state.verticalSymmetrySteps = clamp(
state.verticalSymmetrySteps,
0,
state.steps
);
},
setCfgScale: (state, action: PayloadAction<number>) => { setCfgScale: (state, action: PayloadAction<number>) => {
state.cfgScale = action.payload; state.cfgScale = action.payload;
}, },
@ -334,22 +347,17 @@ export const generationSlice = createSlice({
setShouldUseSymmetry: (state, action: PayloadAction<boolean>) => { setShouldUseSymmetry: (state, action: PayloadAction<boolean>) => {
state.shouldUseSymmetry = action.payload; state.shouldUseSymmetry = action.payload;
}, },
setHorizontalSymmetryTimePercentage: ( setHorizontalSymmetrySteps: (state, action: PayloadAction<number>) => {
state, state.horizontalSymmetrySteps = action.payload;
action: PayloadAction<number>
) => {
state.horizontalSymmetryTimePercentage = action.payload;
}, },
setVerticalSymmetryTimePercentage: ( setVerticalSymmetrySteps: (state, action: PayloadAction<number>) => {
state, state.verticalSymmetrySteps = action.payload;
action: PayloadAction<number>
) => {
state.verticalSymmetryTimePercentage = action.payload;
}, },
}, },
}); });
export const { export const {
clampSymmetrySteps,
clearInitialImage, clearInitialImage,
resetParametersState, resetParametersState,
resetSeed, resetSeed,
@ -384,8 +392,8 @@ export const {
setVariationAmount, setVariationAmount,
setWidth, setWidth,
setShouldUseSymmetry, setShouldUseSymmetry,
setHorizontalSymmetryTimePercentage, setHorizontalSymmetrySteps,
setVerticalSymmetryTimePercentage, setVerticalSymmetrySteps,
} = generationSlice.actions; } = generationSlice.actions;
export default generationSlice.reducer; export default generationSlice.reducer;

View File

@ -57,19 +57,19 @@ export default function MergeModels() {
const [modelMergeForce, setModelMergeForce] = useState<boolean>(false); const [modelMergeForce, setModelMergeForce] = useState<boolean>(false);
const modelOneList = Object.keys(diffusersModels).filter((model) => { const modelOneList = Object.keys(diffusersModels).filter(
if (model !== modelTwo && model !== modelThree) return model; (model) => model !== modelTwo && model !== modelThree
}); );
const modelTwoList = Object.keys(diffusersModels).filter((model) => { const modelTwoList = Object.keys(diffusersModels).filter(
if (model !== modelOne && model !== modelThree) return model; (model) => model !== modelOne && model !== modelThree
}); );
const modelThreeList = [ const modelThreeList = [
'none', { key: t('modelManager.none'), value: 'none' },
...Object.keys(diffusersModels).filter((model) => { ...Object.keys(diffusersModels)
if (model !== modelOne && model !== modelTwo) return model; .filter((model) => model !== modelOne && model !== modelTwo)
}), .map((model) => ({ key: model, value: model })),
]; ];
const isProcessing = useAppSelector( const isProcessing = useAppSelector(
@ -209,18 +209,22 @@ export default function MergeModels() {
<Flex columnGap={4}> <Flex columnGap={4}>
{modelThree === 'none' ? ( {modelThree === 'none' ? (
<> <>
<Radio value="weighted_sum">weighted_sum</Radio> <Radio value="weighted_sum">
<Radio value="sigmoid">sigmoid</Radio> {t('modelManager.weightedSum')}
<Radio value="inv_sigmoid">inv_sigmoid</Radio> </Radio>
<Radio value="sigmoid">{t('modelManager.sigmoid')}</Radio>
<Radio value="inv_sigmoid">
{t('modelManager.inverseSigmoid')}
</Radio>
</> </>
) : ( ) : (
<Radio value="add_difference"> <Radio value="add_difference">
<Tooltip <Tooltip
label={t( label={t(
'modelmanager:modelMergeInterpAddDifferenceHelp' 'modelManager.modelMergeInterpAddDifferenceHelp'
)} )}
> >
add_difference {t('modelManager.addDifference')}
</Tooltip> </Tooltip>
</Radio> </Radio>
)} )}

View File

@ -18,6 +18,7 @@ import { setParametersPanelScrollPosition } from 'features/ui/store/uiSlice';
import InvokeAILogo from 'assets/images/logo.png'; import InvokeAILogo from 'assets/images/logo.png';
import { isEqual } from 'lodash'; import { isEqual } from 'lodash';
import { uiSelector } from '../store/uiSelectors'; import { uiSelector } from '../store/uiSelectors';
import { useTranslation } from 'react-i18next';
type Props = { children: ReactNode }; type Props = { children: ReactNode };
@ -60,6 +61,8 @@ const InvokeOptionsPanel = (props: Props) => {
const { children } = props; const { children } = props;
const { t } = useTranslation();
// Hotkeys // Hotkeys
useHotkeys( useHotkeys(
'o', 'o',
@ -176,7 +179,7 @@ const InvokeOptionsPanel = (props: Props) => {
} }
}} }}
> >
<Tooltip label="Pin Options Panel"> <Tooltip label={t('common.pinOptionsPanel')}>
<div <div
className="parameters-panel-pin-button" className="parameters-panel-pin-button"
data-selected={shouldPinParametersPanel} data-selected={shouldPinParametersPanel}

View File

@ -1,11 +1,16 @@
import 'i18next'; import 'i18next';
import en from '../public/locales/en.json';
declare module 'i18next' { declare module 'i18next' {
// Extend CustomTypeOptions // Extend CustomTypeOptions
interface CustomTypeOptions { interface CustomTypeOptions {
// Setting Default Namespace As English // Setting Default Namespace As English
defaultNS: 'en'; defaultNS: 'en';
// Custom Types For Resources // Custom Types For Resources
resources: {
en: typeof en;
};
// Never Return Null // Never Return Null
returnNull: false; returnNull: false;
} }

File diff suppressed because one or more lines are too long

View File

@ -650,6 +650,8 @@ class Generate:
def clear_cuda_cache(self): def clear_cuda_cache(self):
if self._has_cuda(): if self._has_cuda():
self.gather_cuda_stats() self.gather_cuda_stats()
# Run garbage collection prior to emptying the CUDA cache
gc.collect()
torch.cuda.empty_cache() torch.cuda.empty_cache()
def clear_cuda_stats(self): def clear_cuda_stats(self):

View File

@ -625,7 +625,7 @@ def set_default_output_dir(opt: Args, completer: Completer):
completer.set_default_dir(opt.outdir) completer.set_default_dir(opt.outdir)
def import_model(model_path: str, gen, opt, completer, convert=False) -> str: def import_model(model_path: str, gen, opt, completer, convert=False):
""" """
model_path can be (1) a URL to a .ckpt file; (2) a local .ckpt file path; model_path can be (1) a URL to a .ckpt file; (2) a local .ckpt file path;
(3) a huggingface repository id; or (4) a local directory containing a (3) a huggingface repository id; or (4) a local directory containing a
@ -679,7 +679,7 @@ def _verify_load(model_name: str, gen) -> bool:
current_model = gen.model_name current_model = gen.model_name
try: try:
if not gen.set_model(model_name): if not gen.set_model(model_name):
return False return
except Exception as e: except Exception as e:
print(f"** model failed to load: {str(e)}") print(f"** model failed to load: {str(e)}")
print( print(
@ -706,7 +706,7 @@ def _get_model_name_and_desc(
) )
return model_name, model_description return model_name, model_description
def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer) -> str: def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
model_name_or_path = model_name_or_path.replace("\\", "/") # windows model_name_or_path = model_name_or_path.replace("\\", "/") # windows
manager = gen.model_manager manager = gen.model_manager
ckpt_path = None ckpt_path = None
@ -740,19 +740,14 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer) ->
) )
else: else:
try: try:
model_name = import_model(model_name_or_path, gen, opt, completer, convert=True) import_model(model_name_or_path, gen, opt, completer, convert=True)
except KeyboardInterrupt: except KeyboardInterrupt:
return return
if not model_name:
print("** Conversion failed. Aborting.")
return
manager.commit(opt.conf) manager.commit(opt.conf)
if click.confirm(f"Delete the original .ckpt file at {ckpt_path}?", default=False): if click.confirm(f"Delete the original .ckpt file at {ckpt_path}?", default=False):
ckpt_path.unlink(missing_ok=True) ckpt_path.unlink(missing_ok=True)
print(f"{ckpt_path} deleted") print(f"{ckpt_path} deleted")
return model_name
def del_config(model_name: str, gen, opt, completer): def del_config(model_name: str, gen, opt, completer):

View File

@ -1 +1 @@
__version__='2.3.1+rc3' __version__='2.3.1.post2'

View File

@ -17,16 +17,15 @@
# Original file at: https://github.com/huggingface/diffusers/blob/main/scripts/convert_ldm_original_checkpoint_to_diffusers.py # Original file at: https://github.com/huggingface/diffusers/blob/main/scripts/convert_ldm_original_checkpoint_to_diffusers.py
""" Conversion script for the LDM checkpoints. """ """ Conversion script for the LDM checkpoints. """
import os
import re import re
import torch import torch
import warnings import warnings
from pathlib import Path from pathlib import Path
from ldm.invoke.globals import ( from ldm.invoke.globals import (
Globals,
global_cache_dir, global_cache_dir,
global_config_dir, global_config_dir,
) )
from ldm.invoke.model_manager import ModelManager, SDLegacyType
from safetensors.torch import load_file from safetensors.torch import load_file
from typing import Union from typing import Union
@ -760,7 +759,12 @@ def convert_open_clip_checkpoint(checkpoint):
text_model_dict = {} text_model_dict = {}
d_model = int(checkpoint["cond_stage_model.model.text_projection"].shape[0]) if 'cond_stage_model.model.text_projection' in keys:
d_model = int(checkpoint["cond_stage_model.model.text_projection"].shape[0])
elif 'cond_stage_model.model.ln_final.bias' in keys:
d_model = int(checkpoint['cond_stage_model.model.ln_final.bias'].shape[0])
else:
raise KeyError('Expected key "cond_stage_model.model.text_projection" not found in model')
text_model_dict["text_model.embeddings.position_ids"] = text_model.text_model.embeddings.get_buffer("position_ids") text_model_dict["text_model.embeddings.position_ids"] = text_model.text_model.embeddings.get_buffer("position_ids")
@ -856,20 +860,23 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
upcast_attention = False upcast_attention = False
if original_config_file is None: if original_config_file is None:
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" model_type = ModelManager.probe_model_type(checkpoint)
if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024: if model_type == SDLegacyType.V2:
original_config_file = global_config_dir() / 'stable-diffusion' / 'v2-inference-v.yaml' original_config_file = global_config_dir() / 'stable-diffusion' / 'v2-inference-v.yaml'
if global_step == 110000: if global_step == 110000:
# v2.1 needs to upcast attention # v2.1 needs to upcast attention
upcast_attention = True upcast_attention = True
elif str(checkpoint_path).lower().find('inpaint') >= 0: # brittle - please pass original_config_file parameter!
print(f' | checkpoint has "inpaint" in name, assuming an inpainting model') elif model_type == SDLegacyType.V1_INPAINT:
original_config_file = global_config_dir() / 'stable-diffusion' / 'v1-inpainting-inference.yaml' original_config_file = global_config_dir() / 'stable-diffusion' / 'v1-inpainting-inference.yaml'
else:
elif model_type == SDLegacyType.V1:
original_config_file = global_config_dir() / 'stable-diffusion' / 'v1-inference.yaml' original_config_file = global_config_dir() / 'stable-diffusion' / 'v1-inference.yaml'
else:
raise Exception('Unknown checkpoint type')
original_config = OmegaConf.load(original_config_file) original_config = OmegaConf.load(original_config_file)
if num_in_channels is not None: if num_in_channels is not None:
@ -960,7 +967,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
text_model = convert_open_clip_checkpoint(checkpoint) text_model = convert_open_clip_checkpoint(checkpoint)
tokenizer = CLIPTokenizer.from_pretrained("stabilityai/stable-diffusion-2", tokenizer = CLIPTokenizer.from_pretrained("stabilityai/stable-diffusion-2",
subfolder="tokenizer", subfolder="tokenizer",
cache_dir=global_cache_dir('diffusers') cache_dir=cache_dir,
) )
pipe = pipeline_class( pipe = pipeline_class(
vae=vae, vae=vae,

View File

@ -88,7 +88,7 @@ def get_prompt_structure(prompt_string, skip_normalize_legacy_blend: bool = Fals
return positive_prompt, negative_prompt return positive_prompt, negative_prompt
def get_max_token_count(tokenizer, prompt: FlattenedPrompt|Blend, truncate_if_too_long=True) -> int: def get_max_token_count(tokenizer, prompt: Union[FlattenedPrompt, Blend], truncate_if_too_long=True) -> int:
if type(prompt) is Blend: if type(prompt) is Blend:
blend: Blend = prompt blend: Blend = prompt
return max([get_max_token_count(tokenizer, c, truncate_if_too_long) for c in blend.prompts]) return max([get_max_token_count(tokenizer, c, truncate_if_too_long) for c in blend.prompts])
@ -129,8 +129,8 @@ def split_prompt_to_positive_and_negative(prompt_string_uncleaned):
return prompt_string_cleaned, unconditioned_words return prompt_string_cleaned, unconditioned_words
def log_tokenization(positive_prompt: Blend | FlattenedPrompt, def log_tokenization(positive_prompt: Union[Blend, FlattenedPrompt],
negative_prompt: Blend | FlattenedPrompt, negative_prompt: Union[Blend, FlattenedPrompt],
tokenizer): tokenizer):
print(f"\n>> [TOKENLOG] Parsed Prompt: {positive_prompt}") print(f"\n>> [TOKENLOG] Parsed Prompt: {positive_prompt}")
print(f"\n>> [TOKENLOG] Parsed Negative Prompt: {negative_prompt}") print(f"\n>> [TOKENLOG] Parsed Negative Prompt: {negative_prompt}")
@ -139,7 +139,7 @@ def log_tokenization(positive_prompt: Blend | FlattenedPrompt,
log_tokenization_for_prompt_object(negative_prompt, tokenizer, display_label_prefix="(negative prompt)") log_tokenization_for_prompt_object(negative_prompt, tokenizer, display_label_prefix="(negative prompt)")
def log_tokenization_for_prompt_object(p: Blend | FlattenedPrompt, tokenizer, display_label_prefix=None): def log_tokenization_for_prompt_object(p: Union[Blend, FlattenedPrompt], tokenizer, display_label_prefix=None):
display_label_prefix = display_label_prefix or "" display_label_prefix = display_label_prefix or ""
if type(p) is Blend: if type(p) is Blend:
blend: Blend = p blend: Blend = p

View File

@ -191,14 +191,18 @@ def download_bert():
# --------------------------------------------- # ---------------------------------------------
def download_clip(): def download_sd1_clip():
print("Installing CLIP model...", file=sys.stderr) print("Installing SD1 clip model...", file=sys.stderr)
version = "openai/clip-vit-large-patch14" version = "openai/clip-vit-large-patch14"
print("Tokenizer...", file=sys.stderr)
download_from_hf(CLIPTokenizer, version) download_from_hf(CLIPTokenizer, version)
print("Text model...", file=sys.stderr)
download_from_hf(CLIPTextModel, version) download_from_hf(CLIPTextModel, version)
# ---------------------------------------------
def download_sd2_clip():
version = 'stabilityai/stable-diffusion-2'
print("Installing SD2 clip model...", file=sys.stderr)
download_from_hf(CLIPTokenizer, version, subfolder='tokenizer')
download_from_hf(CLIPTextModel, version, subfolder='text_encoder')
# --------------------------------------------- # ---------------------------------------------
def download_realesrgan(): def download_realesrgan():
@ -708,8 +712,8 @@ def write_opts(opts: Namespace, init_file: Path):
out_file.write(line + "\n") out_file.write(line + "\n")
out_file.write( out_file.write(
f""" f"""
--outdir={opts.outdir} --outdir="{opts.outdir}"
--embedding_path={opts.embedding_path} --embedding_path="{opts.embedding_path}"
--precision={opts.precision} --precision={opts.precision}
--max_loaded_models={int(opts.max_loaded_models)} --max_loaded_models={int(opts.max_loaded_models)}
--{'no-' if not opts.safety_checker else ''}nsfw_checker --{'no-' if not opts.safety_checker else ''}nsfw_checker
@ -832,7 +836,8 @@ def main():
else: else:
print("\n** DOWNLOADING SUPPORT MODELS **") print("\n** DOWNLOADING SUPPORT MODELS **")
download_bert() download_bert()
download_clip() download_sd1_clip()
download_sd2_clip()
download_realesrgan() download_realesrgan()
download_gfpgan() download_gfpgan()
download_codeformer() download_codeformer()

View File

@ -114,37 +114,37 @@ class addModelsForm(npyscreen.FormMultiPage):
relx=4, relx=4,
) )
self.nextrely += 1 self.nextrely += 1
self.add_widget_intelligent( if len(self.starter_model_list) > 0:
CenteredTitleText, self.add_widget_intelligent(
name="== STARTER MODELS (recommended ones selected) ==", CenteredTitleText,
editable=False, name="== STARTER MODELS (recommended ones selected) ==",
color="CONTROL", editable=False,
) color="CONTROL",
self.nextrely -= 1 )
self.add_widget_intelligent( self.nextrely -= 1
CenteredTitleText, self.add_widget_intelligent(
name="Select from a starter set of Stable Diffusion models from HuggingFace:", CenteredTitleText,
editable=False, name="Select from a starter set of Stable Diffusion models from HuggingFace.",
labelColor="CAUTION", editable=False,
) labelColor="CAUTION",
)
self.nextrely -= 1 self.nextrely -= 1
# if user has already installed some initial models, then don't patronize them # if user has already installed some initial models, then don't patronize them
# by showing more recommendations # by showing more recommendations
show_recommended = not self.existing_models show_recommended = not self.existing_models
self.models_selected = self.add_widget_intelligent( self.models_selected = self.add_widget_intelligent(
npyscreen.MultiSelect, npyscreen.MultiSelect,
name="Install Starter Models", name="Install Starter Models",
values=starter_model_labels, values=starter_model_labels,
value=[ value=[
self.starter_model_list.index(x) self.starter_model_list.index(x)
for x in self.starter_model_list for x in self.starter_model_list
if show_recommended and x in recommended_models if show_recommended and x in recommended_models
], ],
max_height=len(starter_model_labels) + 1, max_height=len(starter_model_labels) + 1,
relx=4, relx=4,
scroll_exit=True, scroll_exit=True,
) )
self.add_widget_intelligent( self.add_widget_intelligent(
CenteredTitleText, CenteredTitleText,
name='== IMPORT LOCAL AND REMOTE MODELS ==', name='== IMPORT LOCAL AND REMOTE MODELS ==',
@ -166,7 +166,11 @@ class addModelsForm(npyscreen.FormMultiPage):
) )
self.nextrely -= 1 self.nextrely -= 1
self.import_model_paths = self.add_widget_intelligent( self.import_model_paths = self.add_widget_intelligent(
TextBox, max_height=5, scroll_exit=True, editable=True, relx=4 TextBox,
max_height=7,
scroll_exit=True,
editable=True,
relx=4
) )
self.nextrely += 1 self.nextrely += 1
self.show_directory_fields = self.add_widget_intelligent( self.show_directory_fields = self.add_widget_intelligent(
@ -241,7 +245,8 @@ class addModelsForm(npyscreen.FormMultiPage):
def resize(self): def resize(self):
super().resize() super().resize()
self.models_selected.values = self._get_starter_model_labels() if hasattr(self,'models_selected'):
self.models_selected.values = self._get_starter_model_labels()
def _clear_scan_directory(self): def _clear_scan_directory(self):
if not self.show_directory_fields.value: if not self.show_directory_fields.value:
@ -320,11 +325,14 @@ class addModelsForm(npyscreen.FormMultiPage):
selections = self.parentApp.user_selections selections = self.parentApp.user_selections
# starter models to install/remove # starter models to install/remove
starter_models = dict( if hasattr(self,'models_selected'):
map( starter_models = dict(
lambda x: (self.starter_model_list[x], True), self.models_selected.value map(
lambda x: (self.starter_model_list[x], True), self.models_selected.value
)
) )
) else:
starter_models = dict()
selections.purge_deleted_models = False selections.purge_deleted_models = False
if hasattr(self, "previously_installed_models"): if hasattr(self, "previously_installed_models"):
unchecked = [ unchecked = [

View File

@ -126,7 +126,7 @@ def install_requested_models(
while line := input.readline(): while line := input.readline():
if not line.startswith(argument): if not line.startswith(argument):
output.writelines([line]) output.writelines([line])
output.writelines([f'{argument} {directory}']) output.writelines([f'{argument} "{directory}"'])
os.replace(replacement,initfile) os.replace(replacement,initfile)
# ------------------------------------- # -------------------------------------

View File

@ -137,17 +137,9 @@ class Generator:
Given samples returned from a sampler, converts Given samples returned from a sampler, converts
it into a PIL Image it into a PIL Image
""" """
x_samples = self.model.decode_first_stage(samples) with torch.inference_mode():
x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0) image = self.model.decode_latents(samples)
if len(x_samples) != 1: return self.model.numpy_to_pil(image)[0]
raise Exception(
f'>> expected to get a single image, but got {len(x_samples)}')
x_sample = 255.0 * rearrange(
x_samples[0].cpu().numpy(), 'c h w -> h w c'
)
return Image.fromarray(x_sample.astype(np.uint8))
# write an approximate RGB image from latent samples for a single step to PNG
def repaste_and_color_correct(self, result: Image.Image, init_image: Image.Image, init_mask: Image.Image, mask_blur_radius: int = 8) -> Image.Image: def repaste_and_color_correct(self, result: Image.Image, init_image: Image.Image, init_mask: Image.Image, mask_blur_radius: int = 8) -> Image.Image:
if init_image is None or init_mask is None: if init_image is None or init_mask is None:

View File

@ -624,7 +624,7 @@ class ModelManager(object):
self, self,
repo_or_path: Union[str, Path], repo_or_path: Union[str, Path],
model_name: str = None, model_name: str = None,
model_description: str = None, description: str = None,
vae: dict = None, vae: dict = None,
commit_to_conf: Path = None, commit_to_conf: Path = None,
) -> bool: ) -> bool:
@ -640,7 +640,7 @@ class ModelManager(object):
models.yaml file. models.yaml file.
""" """
model_name = model_name or Path(repo_or_path).stem model_name = model_name or Path(repo_or_path).stem
model_description = model_description or f"Imported diffusers model {model_name}" model_description = description or f"Imported diffusers model {model_name}"
new_config = dict( new_config = dict(
description=model_description, description=model_description,
vae=vae, vae=vae,
@ -725,7 +725,7 @@ class ModelManager(object):
SDLegacyType.V1 SDLegacyType.V1
SDLegacyType.V1_INPAINT SDLegacyType.V1_INPAINT
SDLegacyType.V2 SDLegacyType.V2
UNKNOWN SDLegacyType.UNKNOWN
""" """
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024: if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024:
@ -785,7 +785,7 @@ class ModelManager(object):
print(f">> Probing {thing} for import") print(f">> Probing {thing} for import")
if thing.startswith(("http:", "https:", "ftp:")): if thing.startswith(("http:", "https:", "ftp:")):
print(f" | {thing} appears to be a URL") print(f" | {thing} appears to be a URL")
model_path = self._resolve_path( model_path = self._resolve_path(
thing, "models/ldm/stable-diffusion-v1" thing, "models/ldm/stable-diffusion-v1"
) # _resolve_path does a download if needed ) # _resolve_path does a download if needed
@ -793,15 +793,15 @@ class ModelManager(object):
elif Path(thing).is_file() and thing.endswith((".ckpt", ".safetensors")): elif Path(thing).is_file() and thing.endswith((".ckpt", ".safetensors")):
if Path(thing).stem in ["model", "diffusion_pytorch_model"]: if Path(thing).stem in ["model", "diffusion_pytorch_model"]:
print( print(
f" | {Path(thing).name} appears to be part of a diffusers model. Skipping import" f" | {Path(thing).name} appears to be part of a diffusers model. Skipping import"
) )
return return
else: else:
print(f" | {thing} appears to be a checkpoint file on disk") print(f" | {thing} appears to be a checkpoint file on disk")
model_path = self._resolve_path(thing, "models/ldm/stable-diffusion-v1") model_path = self._resolve_path(thing, "models/ldm/stable-diffusion-v1")
elif Path(thing).is_dir() and Path(thing, "model_index.json").exists(): elif Path(thing).is_dir() and Path(thing, "model_index.json").exists():
print(f" | {thing} appears to be a diffusers file on disk") print(f" | {thing} appears to be a diffusers file on disk")
model_name = self.import_diffuser_model( model_name = self.import_diffuser_model(
thing, thing,
vae=dict(repo_id="stabilityai/sd-vae-ft-mse"), vae=dict(repo_id="stabilityai/sd-vae-ft-mse"),
@ -812,13 +812,13 @@ class ModelManager(object):
elif Path(thing).is_dir(): elif Path(thing).is_dir():
if (Path(thing) / "model_index.json").exists(): if (Path(thing) / "model_index.json").exists():
print(f">> {thing} appears to be a diffusers model.") print(f" | {thing} appears to be a diffusers model.")
model_name = self.import_diffuser_model( model_name = self.import_diffuser_model(
thing, commit_to_conf=commit_to_conf thing, commit_to_conf=commit_to_conf
) )
else: else:
print( print(
f">> {thing} appears to be a directory. Will scan for models to import" f" |{thing} appears to be a directory. Will scan for models to import"
) )
for m in list(Path(thing).rglob("*.ckpt")) + list( for m in list(Path(thing).rglob("*.ckpt")) + list(
Path(thing).rglob("*.safetensors") Path(thing).rglob("*.safetensors")
@ -830,7 +830,7 @@ class ModelManager(object):
return model_name return model_name
elif re.match(r"^[\w.+-]+/[\w.+-]+$", thing): elif re.match(r"^[\w.+-]+/[\w.+-]+$", thing):
print(f" | {thing} appears to be a HuggingFace diffusers repo_id") print(f" | {thing} appears to be a HuggingFace diffusers repo_id")
model_name = self.import_diffuser_model( model_name = self.import_diffuser_model(
thing, commit_to_conf=commit_to_conf thing, commit_to_conf=commit_to_conf
) )
@ -847,7 +847,7 @@ class ModelManager(object):
return return
if model_path.stem in self.config: # already imported if model_path.stem in self.config: # already imported
print(" | Already imported. Skipping") print(" | Already imported. Skipping")
return return
# another round of heuristics to guess the correct config file. # another round of heuristics to guess the correct config file.
@ -860,18 +860,18 @@ class ModelManager(object):
model_config_file = None model_config_file = None
if model_type == SDLegacyType.V1: if model_type == SDLegacyType.V1:
print(" | SD-v1 model detected") print(" | SD-v1 model detected")
model_config_file = Path( model_config_file = Path(
Globals.root, "configs/stable-diffusion/v1-inference.yaml" Globals.root, "configs/stable-diffusion/v1-inference.yaml"
) )
elif model_type == SDLegacyType.V1_INPAINT: elif model_type == SDLegacyType.V1_INPAINT:
print(" | SD-v1 inpainting model detected") print(" | SD-v1 inpainting model detected")
model_config_file = Path( model_config_file = Path(
Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml" Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml"
) )
elif model_type == SDLegacyType.V2: elif model_type == SDLegacyType.V2:
print( print(
" | SD-v2 model detected; model will be converted to diffusers format" " | SD-v2 model detected; model will be converted to diffusers format"
) )
model_config_file = Path( model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml" Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
@ -923,7 +923,7 @@ class ModelManager(object):
vae=None, vae=None,
original_config_file: Path = None, original_config_file: Path = None,
commit_to_conf: Path = None, commit_to_conf: Path = None,
) -> dict: ) -> str:
""" """
Convert a legacy ckpt weights file to diffuser model and import Convert a legacy ckpt weights file to diffuser model and import
into models.yaml. into models.yaml.

View File

@ -2,14 +2,14 @@
# General # General
site_name: InvokeAI Stable Diffusion Toolkit Docs site_name: InvokeAI Stable Diffusion Toolkit Docs
site_url: https://invoke-ai.github.io/InvokeAI site_url: !ENV [SITE_URL, 'https://invoke-ai.github.io/InvokeAI']
site_author: mauwii site_author: mauwii
dev_addr: '127.0.0.1:8080' dev_addr: '127.0.0.1:8080'
# Repository # Repository
repo_name: 'invoke-ai/InvokeAI' repo_name: !ENV [REPO_NAME, 'invoke-ai/InvokeAI']
repo_url: 'https://github.com/invoke-ai/InvokeAI' repo_url: !ENV [REPO_URL, 'https://github.com/invoke-ai/InvokeAI']
edit_uri: edit/main/docs/ edit_uri: blob/main/docs/
# Copyright # Copyright
copyright: Copyright &copy; 2022 InvokeAI Team copyright: Copyright &copy; 2022 InvokeAI Team
@ -19,7 +19,8 @@ theme:
name: material name: material
icon: icon:
repo: fontawesome/brands/github repo: fontawesome/brands/github
edit: material/file-document-edit-outline edit: material/pencil
view: material/eye
palette: palette:
- media: '(prefers-color-scheme: light)' - media: '(prefers-color-scheme: light)'
scheme: default scheme: default
@ -33,6 +34,11 @@ theme:
icon: material/lightbulb-outline icon: material/lightbulb-outline
name: Switch to light mode name: Switch to light mode
features: features:
- content.action.edit
- content.action.view
- content.code.copy
- content.tabs.link
- navigation.indexes
- navigation.instant - navigation.instant
- navigation.tabs - navigation.tabs
- navigation.top - navigation.top
@ -89,9 +95,9 @@ plugins:
enable_creation_date: true enable_creation_date: true
- redirects: - redirects:
redirect_maps: redirect_maps:
'installation/INSTALL_AUTOMATED.md': 'installation/010_INSTALL_AUTOMATED.md' 'installation/INSTALL_AUTOMATED.md': 'installation/010_INSTALL_AUTOMATED.md'
'installation/INSTALL_MANUAL.md': 'installation/020_INSTALL_MANUAL.md' 'installation/INSTALL_MANUAL.md': 'installation/020_INSTALL_MANUAL.md'
'installation/INSTALL_SOURCE.md': 'installation/020_INSTALL_MANUAL.md' 'installation/INSTALL_SOURCE.md': 'installation/020_INSTALL_MANUAL.md'
'installation/INSTALL_DOCKER.md': 'installation/040_INSTALL_DOCKER.md' 'installation/INSTALL_DOCKER.md': 'installation/040_INSTALL_DOCKER.md'
'installation/INSTALLING_MODELS.md': 'installation/050_INSTALLING_MODELS.md' 'installation/INSTALLING_MODELS.md': 'installation/050_INSTALLING_MODELS.md'
'installation/INSTALL_PATCHMATCH.md': 'installation/060_INSTALL_PATCHMATCH.md' 'installation/INSTALL_PATCHMATCH.md': 'installation/060_INSTALL_PATCHMATCH.md'

View File

@ -1,43 +1,37 @@
[build-system] [build-system]
requires = ["setuptools~=65.5", "pip~=22.3", "wheel"]
build-backend = "setuptools.build_meta" build-backend = "setuptools.build_meta"
requires = ["setuptools ~= 67.1", "wheel"]
[project] [project]
name = "InvokeAI" authors = [{name = "The InvokeAI Project", email = "lincoln.stein@gmail.com"}]
description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process"
requires-python = ">=3.9, <3.11"
readme = { content-type = "text/markdown", file = "README.md" }
keywords = ["stable-diffusion", "AI"]
dynamic = ["version"]
license = { file = "LICENSE" }
authors = [{ name = "The InvokeAI Project", email = "lincoln.stein@gmail.com" }]
classifiers = [ classifiers = [
'Development Status :: 4 - Beta', "Development Status :: 4 - Beta",
'Environment :: GPU', "Environment :: GPU :: NVIDIA CUDA",
'Environment :: GPU :: NVIDIA CUDA', "Environment :: GPU",
'Environment :: MacOS X', "Environment :: MacOS X",
'Intended Audience :: End Users/Desktop', "Intended Audience :: Developers",
'Intended Audience :: Developers', "Intended Audience :: End Users/Desktop",
'License :: OSI Approved :: MIT License', "License :: OSI Approved :: MIT License",
'Operating System :: POSIX :: Linux', "Operating System :: MacOS",
'Operating System :: MacOS', "Operating System :: Microsoft :: Windows",
'Operating System :: Microsoft :: Windows', "Operating System :: POSIX :: Linux",
'Programming Language :: Python :: 3 :: Only', "Programming Language :: Python :: 3 :: Only",
'Programming Language :: Python :: 3.8', "Programming Language :: Python :: 3",
'Programming Language :: Python :: 3.9', "Programming Language :: Python :: 3.10",
'Programming Language :: Python :: 3.10', "Programming Language :: Python :: 3.9",
'Topic :: Artistic Software', "Programming Language :: Python",
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application', "Topic :: Artistic Software",
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server', "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
'Topic :: Multimedia :: Graphics', "Topic :: Internet :: WWW/HTTP :: WSGI :: Server",
'Topic :: Scientific/Engineering :: Artificial Intelligence', "Topic :: Multimedia :: Graphics",
'Topic :: Scientific/Engineering :: Image Processing', "Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Processing",
] ]
dependencies = [ dependencies = [
"accelerate", "accelerate",
"albumentations", "albumentations",
"click", "click",
"clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", "clip_anytorch",
"compel==0.1.7", "compel==0.1.7",
"datasets", "datasets",
"diffusers[torch]~=0.13", "diffusers[torch]~=0.13",
@ -54,7 +48,7 @@ dependencies = [
"huggingface-hub>=0.11.1", "huggingface-hub>=0.11.1",
"imageio", "imageio",
"imageio-ffmpeg", "imageio-ffmpeg",
"k-diffusion", # replacing "k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip", "k-diffusion",
"kornia", "kornia",
"npyscreen", "npyscreen",
"numpy<1.24", "numpy<1.24",
@ -62,8 +56,8 @@ dependencies = [
"opencv-python", "opencv-python",
"picklescan", "picklescan",
"pillow", "pillow",
"pudb",
"prompt-toolkit", "prompt-toolkit",
"pudb",
"pypatchmatch", "pypatchmatch",
"pyreadline3", "pyreadline3",
"pytorch-lightning==1.7.7", "pytorch-lightning==1.7.7",
@ -75,62 +69,116 @@ dependencies = [
"streamlit", "streamlit",
"taming-transformers-rom1504", "taming-transformers-rom1504",
"test-tube>=0.7.5", "test-tube>=0.7.5",
"torch>=1.13.1",
"torch-fidelity", "torch-fidelity",
"torchvision>=0.14.1", "torch>=1.13.1",
"torchmetrics", "torchmetrics",
"torchvision>=0.14.1",
"transformers~=4.25", "transformers~=4.25",
"windows-curses; sys_platform=='win32'", "windows-curses; sys_platform=='win32'",
] ]
description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process"
dynamic = ["version"]
keywords = ["AI", "stable-diffusion"]
license = {text = "MIT"}
name = "InvokeAI"
readme = {content-type = "text/markdown", file = "README.md"}
requires-python = ">=3.9, <3.11"
[project.optional-dependencies] [project.optional-dependencies]
"dev" = [
"black[jupyter]",
"flake8",
"flake8-black",
"flake8-bugbear",
"isort",
"pre-commit",
]
"dist" = ["pip-tools", "pipdeptree", "twine"] "dist" = ["pip-tools", "pipdeptree", "twine"]
"docs" = [ "docs" = [
"mkdocs-material<9.0",
"mkdocs-git-revision-date-localized-plugin", "mkdocs-git-revision-date-localized-plugin",
"mkdocs-material==9.*",
"mkdocs-redirects==1.2.0", "mkdocs-redirects==1.2.0",
] ]
"test" = ["pytest>6.0.0", "pytest-cov"] "test" = ["pytest-cov", "pytest>6.0.0"]
"xformers" = [ "xformers" = [
"xformers~=0.0.16; sys_platform!='darwin'", "triton; sys_platform=='linux'",
"triton; sys_platform=='linux'", "xformers~=0.0.16; sys_platform!='darwin'",
] ]
[project.scripts] [project.scripts]
# legacy entrypoints; provided for backwards compatibility # legacy entrypoints; provided for backwards compatibility
"invoke.py" = "ldm.invoke.CLI:main"
"configure_invokeai.py" = "ldm.invoke.config.invokeai_configure:main" "configure_invokeai.py" = "ldm.invoke.config.invokeai_configure:main"
"textual_inversion.py" = "ldm.invoke.training.textual_inversion:main" "invoke.py" = "ldm.invoke.CLI:main"
"merge_embeddings.py" = "ldm.invoke.merge_diffusers:main" "merge_embeddings.py" = "ldm.invoke.merge_diffusers:main"
"textual_inversion.py" = "ldm.invoke.training.textual_inversion:main"
# modern entrypoints # modern entrypoints
"invokeai" = "ldm.invoke.CLI:main" "invokeai" = "ldm.invoke.CLI:main"
"invokeai-configure" = "ldm.invoke.config.invokeai_configure:main" "invokeai-configure" = "ldm.invoke.config.invokeai_configure:main"
"invokeai-merge" = "ldm.invoke.merge_diffusers:main" # note name munging "invokeai-merge" = "ldm.invoke.merge_diffusers:main"
"invokeai-ti" = "ldm.invoke.training.textual_inversion:main" "invokeai-ti" = "ldm.invoke.training.textual_inversion:main"
"invokeai-model-install" = "ldm.invoke.config.model_install:main"
"invokeai-update" = "ldm.invoke.config.invokeai_update:main"
[project.urls] [project.urls]
"Homepage" = "https://invoke-ai.github.io/InvokeAI/"
"Documentation" = "https://invoke-ai.github.io/InvokeAI/"
"Source" = "https://github.com/invoke-ai/InvokeAI/"
"Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues" "Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues"
"Discord" = "https://discord.gg/ZmtBAhwWhy" "Discord" = "https://discord.gg/ZmtBAhwWhy"
"Documentation" = "https://invoke-ai.github.io/InvokeAI/"
"Homepage" = "https://invoke-ai.github.io/InvokeAI/"
"Source" = "https://github.com/invoke-ai/InvokeAI/"
[tool.setuptools]
license-files = ["LICENSE"]
[tool.setuptools.dynamic] [tool.setuptools.dynamic]
version = { attr = "ldm.invoke.__version__" } version = {attr = "ldm.invoke.__version__"}
[tool.setuptools.packages.find] [tool.setuptools.packages.find]
"include" = [
"invokeai.assets.web",
"invokeai.backend*",
"invokeai.configs*",
"invokeai.frontend.dist*",
"ldm*",
]
"where" = ["."] "where" = ["."]
"include" = ["invokeai.assets.web*", "invokeai.backend*", "invokeai.frontend.dist*", "invokeai.configs*", "ldm*"]
[tool.setuptools.package-data] [tool.setuptools.package-data]
"invokeai.assets.web" = ["**.png"] "invokeai.assets.web" = ["**.png"]
"invokeai.backend" = ["**.png"] "invokeai.configs" = ["**.example", "**.txt", "**.yaml"]
"invokeai.configs" = ["*.example", "**/*.yaml", "*.txt"]
"invokeai.frontend.dist" = ["**"] "invokeai.frontend.dist" = ["**"]
[tool.black]
extend-exclude = '''
/(
# skip legacy scripts
| scripts/orig_scripts
)/
'''
line-length = 88
target-version = ['py39']
[tool.isort]
atomic = true
extend_skip_glob = ["scripts/orig_scripts/*"]
filter_files = true
line_length = 120
profile = "black"
py_version = 39
remove_redundant_aliases = true
skip_gitignore = true
src_paths = ["installer", "invokeai", "ldm", "tests"]
virtual_env = ".venv"
[tool.coverage.run]
branch = true
parallel = true
[tool.coverage.report]
skip_covered = true
skip_empty = true
[tool.coverage.paths]
source = ["invokeai/backend", "ldm/invoke"]
[tool.pytest.ini_options] [tool.pytest.ini_options]
addopts = "-p pytest_cov --junitxml=junit/test-results.xml --cov-report=term:skip-covered --cov=ldm/invoke --cov=backend --cov-branch" addopts = ["--cov=invokeai/backend", "--cov=ldm/invoke"]

View File

@ -0,0 +1,23 @@
#!/usr/bin/env python
'''
This script is used at release time to generate a markdown table describing the
starter models. This text is then manually copied into 050_INSTALL_MODELS.md.
'''
from omegaconf import OmegaConf
from pathlib import Path
def main():
initial_models_file = Path(__file__).parent / '../invokeai/configs/INITIAL_MODELS.yaml'
models = OmegaConf.load(initial_models_file)
print('|Model Name | HuggingFace Repo ID | Description | URL |')
print('|---------- | ---------- | ----------- | --- |')
for model in models:
repo_id = models[model].repo_id
url = f'https://huggingface.co/{repo_id}'
print(f'|{model}|{repo_id}|{models[model].description}|{url} |')
if __name__ == '__main__':
main()