mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Compare commits
160 Commits
test/node-
...
v2.3.3-rc1
Author | SHA1 | Date | |
---|---|---|---|
1cb88960fe | |||
610a1483b7 | |||
b4e7fc0d1d | |||
b792b7d68c | |||
abaa91195d | |||
1806bfb755 | |||
7377855c02 | |||
5f2a6f24cf | |||
5b8b92d957 | |||
352202a7bc | |||
82144de85f | |||
b70d713e89 | |||
e39dde4140 | |||
c151541703 | |||
29b348ece1 | |||
9f7c86c33e | |||
a79d40519c | |||
4515d52a42 | |||
2a8513eee0 | |||
b856fac713 | |||
4a3951681c | |||
ba89444e36 | |||
a044403ac3 | |||
16dea46b79 | |||
1f80b5335b | |||
eee7f13771 | |||
6db509a4ff | |||
b7965e1ee6 | |||
c3d292e8f9 | |||
206593ec99 | |||
1b62c781d7 | |||
c4de509983 | |||
8d80802a35 | |||
694925f427 | |||
61d5cb2536 | |||
c23fe4f6d2 | |||
e6e93bbb80 | |||
b5bd5240b6 | |||
827ac82d54 | |||
9c2f3259ca | |||
6abe2bfe42 | |||
acf955fc7b | |||
023db8ac41 | |||
65cf733a0c | |||
8323169864 | |||
bf5cd1bd3b | |||
c9db01e272 | |||
6d5e9161fb | |||
0636348585 | |||
4c44523ba0 | |||
5372800e60 | |||
2ae396640b | |||
252f222068 | |||
142ba8c8ea | |||
84dfd2003e | |||
5a633ba811 | |||
f207647f0f | |||
ad16581ab8 | |||
fd722ddf7d | |||
d669e69755 | |||
d912bab4c2 | |||
68c2722c02 | |||
426fea9681 | |||
62cfdb9f11 | |||
46b4d6497c | |||
757c0a5775 | |||
9c8f0b44ad | |||
21433a948c | |||
183344b878 | |||
fc164d5be2 | |||
45aa770cd1 | |||
6d0e782d71 | |||
117f70e1ec | |||
c840bd8c12 | |||
3c64fad379 | |||
bc813e4065 | |||
7c1d2422f0 | |||
a5b11e1071 | |||
c7e4daf431 | |||
4c61f3a514 | |||
2a179799d8 | |||
650f4bb58c | |||
7b92b27ceb | |||
8f1b301d01 | |||
e3a19d4f3e | |||
70283f7d8d | |||
ecbb385447 | |||
8dc56471ef | |||
282ba201d2 | |||
2394f6458f | |||
47c1be3322 | |||
741464b053 | |||
3aab5e7e20 | |||
1e7a6dc676 | |||
81fd2ee8c1 | |||
357601e2d6 | |||
71ff759692 | |||
b0657d5fde | |||
fa391c0b78 | |||
6082aace6d | |||
7ef63161ba | |||
b731b55de4 | |||
51956ba356 | |||
f494077003 | |||
317165c410 | |||
f5aadbc200 | |||
774230f7b9 | |||
72e25d99c7 | |||
7c7c1ba02d | |||
9c6af74556 | |||
57daa3e1c2 | |||
ce98fdc5c4 | |||
f901645c12 | |||
f514f17e92 | |||
8744dd0c46 | |||
f3d669319e | |||
ace7032067 | |||
d32819875a | |||
5b5898827c | |||
8a233174de | |||
bec81170b5 | |||
2f25363d76 | |||
2aa5688d90 | |||
ed06a70eca | |||
e80160f8dd | |||
bfe64b1510 | |||
bb1769abab | |||
e3f906e90d | |||
d77dc68119 | |||
ee3d695e2e | |||
0443befd2f | |||
b4fd02b910 | |||
4e0fe4ad6e | |||
3231499992 | |||
c134161a45 | |||
c3f533f20f | |||
519a9071a8 | |||
87b4663026 | |||
6c11e8ee06 | |||
2a739890a3 | |||
02e84c9565 | |||
39715017f9 | |||
35518542f8 | |||
0aa1106c96 | |||
33f832e6ab | |||
281c788489 | |||
3858bef185 | |||
f9a1afd09c | |||
251e9c0294 | |||
d8bf2e3c10 | |||
218f30b7d0 | |||
da983c7773 | |||
7012e16c43 | |||
b1050abf7f | |||
210998081a | |||
604acb9d91 | |||
5beeb1a897 | |||
de6304b729 | |||
d0be79c33d | |||
c22326f9f8 |
@ -1,5 +1,8 @@
|
|||||||
|
root = true
|
||||||
|
|
||||||
# All files
|
# All files
|
||||||
[*]
|
[*]
|
||||||
|
max_line_length = 80
|
||||||
charset = utf-8
|
charset = utf-8
|
||||||
end_of_line = lf
|
end_of_line = lf
|
||||||
indent_size = 2
|
indent_size = 2
|
||||||
@ -10,3 +13,18 @@ trim_trailing_whitespace = true
|
|||||||
# Python
|
# Python
|
||||||
[*.py]
|
[*.py]
|
||||||
indent_size = 4
|
indent_size = 4
|
||||||
|
max_line_length = 120
|
||||||
|
|
||||||
|
# css
|
||||||
|
[*.css]
|
||||||
|
indent_size = 4
|
||||||
|
|
||||||
|
# flake8
|
||||||
|
[.flake8]
|
||||||
|
indent_size = 4
|
||||||
|
|
||||||
|
# Markdown MkDocs
|
||||||
|
[docs/**/*.md]
|
||||||
|
max_line_length = 80
|
||||||
|
indent_size = 4
|
||||||
|
indent_style = unset
|
||||||
|
37
.flake8
Normal file
37
.flake8
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
[flake8]
|
||||||
|
max-line-length = 120
|
||||||
|
extend-ignore =
|
||||||
|
# See https://github.com/PyCQA/pycodestyle/issues/373
|
||||||
|
E203,
|
||||||
|
# use Bugbear's B950 instead
|
||||||
|
E501,
|
||||||
|
# from black repo https://github.com/psf/black/blob/main/.flake8
|
||||||
|
E266, W503, B907
|
||||||
|
extend-select =
|
||||||
|
# Bugbear line length
|
||||||
|
B950
|
||||||
|
extend-exclude =
|
||||||
|
scripts/orig_scripts/*
|
||||||
|
ldm/models/*
|
||||||
|
ldm/modules/*
|
||||||
|
ldm/data/*
|
||||||
|
ldm/generate.py
|
||||||
|
ldm/util.py
|
||||||
|
ldm/simplet2i.py
|
||||||
|
per-file-ignores =
|
||||||
|
# B950 line too long
|
||||||
|
# W605 invalid escape sequence
|
||||||
|
# F841 assigned to but never used
|
||||||
|
# F401 imported but unused
|
||||||
|
tests/test_prompt_parser.py: B950, W605, F401
|
||||||
|
tests/test_textual_inversion.py: F841, B950
|
||||||
|
# B023 Function definition does not bind loop variable
|
||||||
|
scripts/legacy_api.py: F401, B950, B023, F841
|
||||||
|
ldm/invoke/__init__.py: F401
|
||||||
|
# B010 Do not call setattr with a constant attribute value
|
||||||
|
ldm/invoke/server_legacy.py: B010
|
||||||
|
# =====================
|
||||||
|
# flake-quote settings:
|
||||||
|
# =====================
|
||||||
|
# Set this to match black style:
|
||||||
|
inline-quotes = double
|
50
.github/CODEOWNERS
vendored
50
.github/CODEOWNERS
vendored
@ -2,50 +2,60 @@
|
|||||||
/.github/workflows/ @mauwii @lstein @blessedcoolant
|
/.github/workflows/ @mauwii @lstein @blessedcoolant
|
||||||
|
|
||||||
# documentation
|
# documentation
|
||||||
/docs/ @lstein @mauwii @tildebyte @blessedcoolant
|
/docs/ @lstein @mauwii @blessedcoolant
|
||||||
mkdocs.yml @lstein @mauwii @blessedcoolant
|
mkdocs.yml @mauwii @lstein
|
||||||
|
|
||||||
# installation and configuration
|
# installation and configuration
|
||||||
/pyproject.toml @mauwii @lstein @ebr @blessedcoolant
|
/pyproject.toml @mauwii @lstein @ebr
|
||||||
/docker/ @mauwii @lstein @blessedcoolant
|
/docker/ @mauwii
|
||||||
/scripts/ @ebr @lstein @blessedcoolant
|
/scripts/ @ebr @lstein @blessedcoolant
|
||||||
/installer/ @ebr @lstein @tildebyte @blessedcoolant
|
/installer/ @ebr @lstein
|
||||||
ldm/invoke/config @lstein @ebr @blessedcoolant
|
ldm/invoke/config @lstein @ebr
|
||||||
invokeai/assets @lstein @ebr @blessedcoolant
|
invokeai/assets @lstein @blessedcoolant
|
||||||
invokeai/configs @lstein @ebr @blessedcoolant
|
invokeai/configs @lstein @ebr @blessedcoolant
|
||||||
/ldm/invoke/_version.py @lstein @blessedcoolant
|
/ldm/invoke/_version.py @lstein @blessedcoolant
|
||||||
|
|
||||||
# web ui
|
# web ui
|
||||||
/invokeai/frontend @blessedcoolant @psychedelicious @lstein
|
/invokeai/frontend @blessedcoolant @psychedelicious
|
||||||
/invokeai/backend @blessedcoolant @psychedelicious @lstein
|
/invokeai/backend @blessedcoolant @psychedelicious
|
||||||
|
|
||||||
# generation and model management
|
# generation and model management
|
||||||
/ldm/*.py @lstein @blessedcoolant
|
/ldm/*.py @lstein @blessedcoolant
|
||||||
/ldm/generate.py @lstein @keturn @blessedcoolant
|
/ldm/generate.py @lstein @keturn
|
||||||
/ldm/invoke/args.py @lstein @blessedcoolant
|
/ldm/invoke/args.py @lstein @blessedcoolant
|
||||||
/ldm/invoke/ckpt* @lstein @blessedcoolant
|
/ldm/invoke/ckpt* @lstein @blessedcoolant
|
||||||
/ldm/invoke/ckpt_generator @lstein @blessedcoolant
|
/ldm/invoke/ckpt_generator @lstein @blessedcoolant
|
||||||
/ldm/invoke/CLI.py @lstein @blessedcoolant
|
/ldm/invoke/CLI.py @lstein @blessedcoolant
|
||||||
/ldm/invoke/config @lstein @ebr @mauwii @blessedcoolant
|
/ldm/invoke/config @lstein @ebr @mauwii @blessedcoolant
|
||||||
/ldm/invoke/generator @keturn @damian0815 @blessedcoolant
|
/ldm/invoke/generator @keturn @damian0815
|
||||||
/ldm/invoke/globals.py @lstein @blessedcoolant
|
/ldm/invoke/globals.py @lstein @blessedcoolant
|
||||||
/ldm/invoke/merge_diffusers.py @lstein @blessedcoolant
|
/ldm/invoke/merge_diffusers.py @lstein @blessedcoolant
|
||||||
/ldm/invoke/model_manager.py @lstein @blessedcoolant
|
/ldm/invoke/model_manager.py @lstein @blessedcoolant
|
||||||
/ldm/invoke/txt2mask.py @lstein @blessedcoolant
|
/ldm/invoke/txt2mask.py @lstein @blessedcoolant
|
||||||
/ldm/invoke/patchmatch.py @Kyle0654 @blessedcoolant @lstein
|
/ldm/invoke/patchmatch.py @Kyle0654 @lstein
|
||||||
/ldm/invoke/restoration @lstein @blessedcoolant
|
/ldm/invoke/restoration @lstein @blessedcoolant
|
||||||
|
|
||||||
# attention, textual inversion, model configuration
|
# attention, textual inversion, model configuration
|
||||||
/ldm/models @damian0815 @keturn @lstein @blessedcoolant
|
/ldm/models @damian0815 @keturn @blessedcoolant
|
||||||
/ldm/modules @damian0815 @keturn @lstein @blessedcoolant
|
/ldm/modules/textual_inversion_manager.py @lstein @blessedcoolant
|
||||||
|
/ldm/modules/attention.py @damian0815 @keturn
|
||||||
|
/ldm/modules/diffusionmodules @damian0815 @keturn
|
||||||
|
/ldm/modules/distributions @damian0815 @keturn
|
||||||
|
/ldm/modules/ema.py @damian0815 @keturn
|
||||||
|
/ldm/modules/embedding_manager.py @lstein
|
||||||
|
/ldm/modules/encoders @damian0815 @keturn
|
||||||
|
/ldm/modules/image_degradation @damian0815 @keturn
|
||||||
|
/ldm/modules/losses @damian0815 @keturn
|
||||||
|
/ldm/modules/x_transformer.py @damian0815 @keturn
|
||||||
|
|
||||||
# Nodes
|
# Nodes
|
||||||
apps/ @Kyle0654 @lstein @blessedcoolant
|
apps/ @Kyle0654 @jpphoto
|
||||||
|
|
||||||
# legacy REST API
|
# legacy REST API
|
||||||
# is CapableWeb still engaged?
|
# these are dead code
|
||||||
/ldm/invoke/pngwriter.py @CapableWeb @lstein @blessedcoolant
|
#/ldm/invoke/pngwriter.py @CapableWeb
|
||||||
/ldm/invoke/server_legacy.py @CapableWeb @lstein @blessedcoolant
|
#/ldm/invoke/server_legacy.py @CapableWeb
|
||||||
/scripts/legacy_api.py @CapableWeb @lstein @blessedcoolant
|
#/scripts/legacy_api.py @CapableWeb
|
||||||
/tests/legacy_tests.sh @CapableWeb @lstein @blessedcoolant
|
#/tests/legacy_tests.sh @CapableWeb
|
||||||
|
|
||||||
|
|
||||||
|
10
.github/workflows/mkdocs-material.yml
vendored
10
.github/workflows/mkdocs-material.yml
vendored
@ -9,6 +9,10 @@ jobs:
|
|||||||
mkdocs-material:
|
mkdocs-material:
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
REPO_URL: '${{ github.server_url }}/${{ github.repository }}'
|
||||||
|
REPO_NAME: '${{ github.repository }}'
|
||||||
|
SITE_URL: 'https://${{ github.repository_owner }}.github.io/InvokeAI'
|
||||||
steps:
|
steps:
|
||||||
- name: checkout sources
|
- name: checkout sources
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -19,11 +23,15 @@ jobs:
|
|||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.10'
|
python-version: '3.10'
|
||||||
|
cache: pip
|
||||||
|
cache-dependency-path: pyproject.toml
|
||||||
|
|
||||||
- name: install requirements
|
- name: install requirements
|
||||||
|
env:
|
||||||
|
PIP_USE_PEP517: 1
|
||||||
run: |
|
run: |
|
||||||
python -m \
|
python -m \
|
||||||
pip install -r docs/requirements-mkdocs.txt
|
pip install ".[docs]"
|
||||||
|
|
||||||
- name: confirm buildability
|
- name: confirm buildability
|
||||||
run: |
|
run: |
|
||||||
|
41
.pre-commit-config.yaml
Normal file
41
.pre-commit-config.yaml
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
# See https://pre-commit.com for more information
|
||||||
|
# See https://pre-commit.com/hooks.html for more hooks
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/psf/black
|
||||||
|
rev: 23.1.0
|
||||||
|
hooks:
|
||||||
|
- id: black
|
||||||
|
|
||||||
|
- repo: https://github.com/pycqa/isort
|
||||||
|
rev: 5.12.0
|
||||||
|
hooks:
|
||||||
|
- id: isort
|
||||||
|
|
||||||
|
- repo: https://github.com/PyCQA/flake8
|
||||||
|
rev: 6.0.0
|
||||||
|
hooks:
|
||||||
|
- id: flake8
|
||||||
|
additional_dependencies:
|
||||||
|
- flake8-black
|
||||||
|
- flake8-bugbear
|
||||||
|
- flake8-comprehensions
|
||||||
|
- flake8-simplify
|
||||||
|
|
||||||
|
- repo: https://github.com/pre-commit/mirrors-prettier
|
||||||
|
rev: 'v3.0.0-alpha.4'
|
||||||
|
hooks:
|
||||||
|
- id: prettier
|
||||||
|
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v4.4.0
|
||||||
|
hooks:
|
||||||
|
- id: check-added-large-files
|
||||||
|
- id: check-executables-have-shebangs
|
||||||
|
- id: check-shebang-scripts-are-executable
|
||||||
|
- id: check-merge-conflict
|
||||||
|
- id: check-symlinks
|
||||||
|
- id: check-toml
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
- id: no-commit-to-branch
|
||||||
|
args: ['--branch', 'main']
|
||||||
|
- id: trailing-whitespace
|
14
.prettierignore
Normal file
14
.prettierignore
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
invokeai/frontend/.husky
|
||||||
|
invokeai/frontend/patches
|
||||||
|
|
||||||
|
# Ignore artifacts:
|
||||||
|
build
|
||||||
|
coverage
|
||||||
|
static
|
||||||
|
invokeai/frontend/dist
|
||||||
|
|
||||||
|
# Ignore all HTML files:
|
||||||
|
*.html
|
||||||
|
|
||||||
|
# Ignore deprecated docs
|
||||||
|
docs/installation/deprecated_documentation
|
@ -1,9 +1,9 @@
|
|||||||
endOfLine: lf
|
|
||||||
tabWidth: 2
|
|
||||||
useTabs: false
|
|
||||||
singleQuote: true
|
|
||||||
quoteProps: as-needed
|
|
||||||
embeddedLanguageFormatting: auto
|
embeddedLanguageFormatting: auto
|
||||||
|
endOfLine: lf
|
||||||
|
singleQuote: true
|
||||||
|
semi: true
|
||||||
|
trailingComma: es5
|
||||||
|
useTabs: false
|
||||||
overrides:
|
overrides:
|
||||||
- files: '*.md'
|
- files: '*.md'
|
||||||
options:
|
options:
|
||||||
@ -11,3 +11,9 @@ overrides:
|
|||||||
printWidth: 80
|
printWidth: 80
|
||||||
parser: markdown
|
parser: markdown
|
||||||
cursorOffset: -1
|
cursorOffset: -1
|
||||||
|
- files: docs/**/*.md
|
||||||
|
options:
|
||||||
|
tabWidth: 4
|
||||||
|
- files: 'invokeai/frontend/public/locales/*.json'
|
||||||
|
options:
|
||||||
|
tabWidth: 4
|
||||||
|
5
docs/.markdownlint.jsonc
Normal file
5
docs/.markdownlint.jsonc
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
{
|
||||||
|
"MD046": false,
|
||||||
|
"MD007": false,
|
||||||
|
"MD030": false
|
||||||
|
}
|
@ -154,8 +154,11 @@ training sets will converge with 2000-3000 steps.
|
|||||||
|
|
||||||
This adjusts how many training images are processed simultaneously in
|
This adjusts how many training images are processed simultaneously in
|
||||||
each step. Higher values will cause the training process to run more
|
each step. Higher values will cause the training process to run more
|
||||||
quickly, but use more memory. The default size will run with GPUs with
|
quickly, but use more memory. The default size is selected based on
|
||||||
as little as 12 GB.
|
whether you have the `xformers` memory-efficient attention library
|
||||||
|
installed. If `xformers` is available, the batch size will be 8,
|
||||||
|
otherwise 3. These values were chosen to allow training to run with
|
||||||
|
GPUs with as little as 12 GB VRAM.
|
||||||
|
|
||||||
### Learning rate
|
### Learning rate
|
||||||
|
|
||||||
@ -172,8 +175,10 @@ learning rate to improve performance.
|
|||||||
|
|
||||||
### Use xformers acceleration
|
### Use xformers acceleration
|
||||||
|
|
||||||
This will activate XFormers memory-efficient attention. You need to
|
This will activate XFormers memory-efficient attention, which will
|
||||||
have XFormers installed for this to have an effect.
|
reduce memory requirements by half or more and allow you to select a
|
||||||
|
higher batch size. You need to have XFormers installed for this to
|
||||||
|
have an effect.
|
||||||
|
|
||||||
### Learning rate scheduler
|
### Learning rate scheduler
|
||||||
|
|
||||||
@ -250,6 +255,49 @@ invokeai-ti \
|
|||||||
--only_save_embeds
|
--only_save_embeds
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Using Distributed Training
|
||||||
|
|
||||||
|
If you have multiple GPUs on one machine, or a cluster of GPU-enabled
|
||||||
|
machines, you can activate distributed training. See the [HuggingFace
|
||||||
|
Accelerate pages](https://huggingface.co/docs/accelerate/index) for
|
||||||
|
full information, but the basic recipe is:
|
||||||
|
|
||||||
|
1. Enter the InvokeAI developer's console command line by selecting
|
||||||
|
option [8] from the `invoke.sh`/`invoke.bat` script.
|
||||||
|
|
||||||
|
2. Configurate Accelerate using `accelerate config`:
|
||||||
|
```sh
|
||||||
|
accelerate config
|
||||||
|
```
|
||||||
|
This will guide you through the configuration process, including
|
||||||
|
specifying how many machines you will run training on and the number
|
||||||
|
of GPUs pe rmachine.
|
||||||
|
|
||||||
|
You only need to do this once.
|
||||||
|
|
||||||
|
3. Launch training from the command line using `accelerate launch`. Be sure
|
||||||
|
that your current working directory is the InvokeAI root directory (usually
|
||||||
|
named `invokeai` in your home directory):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
accelerate launch .venv/bin/invokeai-ti \
|
||||||
|
--model=stable-diffusion-1.5 \
|
||||||
|
--resolution=512 \
|
||||||
|
--learnable_property=object \
|
||||||
|
--initializer_token='*' \
|
||||||
|
--placeholder_token='<shraddha>' \
|
||||||
|
--train_data_dir=/home/lstein/invokeai/text-inversion-training-data/shraddha \
|
||||||
|
--output_dir=/home/lstein/invokeai/text-inversion-training/shraddha \
|
||||||
|
--scale_lr \
|
||||||
|
--train_batch_size=10 \
|
||||||
|
--gradient_accumulation_steps=4 \
|
||||||
|
--max_train_steps=2000 \
|
||||||
|
--learning_rate=0.0005 \
|
||||||
|
--lr_scheduler=constant \
|
||||||
|
--mixed_precision=fp16 \
|
||||||
|
--only_save_embeds
|
||||||
|
```
|
||||||
|
|
||||||
## Using Embeddings
|
## Using Embeddings
|
||||||
|
|
||||||
After training completes, the resultant embeddings will be saved into your `$INVOKEAI_ROOT/embeddings/<trigger word>/learned_embeds.bin`.
|
After training completes, the resultant embeddings will be saved into your `$INVOKEAI_ROOT/embeddings/<trigger word>/learned_embeds.bin`.
|
||||||
|
@ -2,62 +2,82 @@
|
|||||||
title: Overview
|
title: Overview
|
||||||
---
|
---
|
||||||
|
|
||||||
Here you can find the documentation for InvokeAI's various features.
|
- The Basics
|
||||||
|
|
||||||
## The Basics
|
- The [Web User Interface](WEB.md)
|
||||||
### * The [Web User Interface](WEB.md)
|
|
||||||
Guide to the Web interface. Also see the [WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
|
|
||||||
|
|
||||||
### * The [Unified Canvas](UNIFIED_CANVAS.md)
|
Guide to the Web interface. Also see the
|
||||||
Build complex scenes by combine and modifying multiple images in a stepwise
|
[WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
|
||||||
fashion. This feature combines img2img, inpainting and outpainting in
|
|
||||||
a single convenient digital artist-optimized user interface.
|
|
||||||
|
|
||||||
### * The [Command Line Interface (CLI)](CLI.md)
|
- The [Unified Canvas](UNIFIED_CANVAS.md)
|
||||||
Scriptable access to InvokeAI's features.
|
|
||||||
|
|
||||||
## Image Generation
|
Build complex scenes by combine and modifying multiple images in a
|
||||||
### * [Prompt Engineering](PROMPTS.md)
|
stepwise fashion. This feature combines img2img, inpainting and
|
||||||
Get the images you want with the InvokeAI prompt engineering language.
|
outpainting in a single convenient digital artist-optimized user
|
||||||
|
interface.
|
||||||
|
|
||||||
## * [Post-Processing](POSTPROCESS.md)
|
- The [Command Line Interface (CLI)](CLI.md)
|
||||||
Restore mangled faces and make images larger with upscaling. Also see the [Embiggen Upscaling Guide](EMBIGGEN.md).
|
|
||||||
|
|
||||||
## * The [Concepts Library](CONCEPTS.md)
|
Scriptable access to InvokeAI's features.
|
||||||
Add custom subjects and styles using HuggingFace's repository of embeddings.
|
|
||||||
|
|
||||||
### * [Image-to-Image Guide for the CLI](IMG2IMG.md)
|
- Image Generation
|
||||||
Use a seed image to build new creations in the CLI.
|
|
||||||
|
|
||||||
### * [Inpainting Guide for the CLI](INPAINTING.md)
|
- [Prompt Engineering](PROMPTS.md)
|
||||||
Selectively erase and replace portions of an existing image in the CLI.
|
|
||||||
|
|
||||||
### * [Outpainting Guide for the CLI](OUTPAINTING.md)
|
Get the images you want with the InvokeAI prompt engineering language.
|
||||||
Extend the borders of the image with an "outcrop" function within the CLI.
|
|
||||||
|
|
||||||
### * [Generating Variations](VARIATIONS.md)
|
- [Post-Processing](POSTPROCESS.md)
|
||||||
Have an image you like and want to generate many more like it? Variations
|
|
||||||
are the ticket.
|
|
||||||
|
|
||||||
## Model Management
|
Restore mangled faces and make images larger with upscaling. Also see
|
||||||
|
the [Embiggen Upscaling Guide](EMBIGGEN.md).
|
||||||
|
|
||||||
## * [Model Installation](../installation/050_INSTALLING_MODELS.md)
|
- The [Concepts Library](CONCEPTS.md)
|
||||||
Learn how to import third-party models and switch among them. This
|
|
||||||
guide also covers optimizing models to load quickly.
|
|
||||||
|
|
||||||
## * [Merging Models](MODEL_MERGING.md)
|
Add custom subjects and styles using HuggingFace's repository of
|
||||||
Teach an old model new tricks. Merge 2-3 models together to create a
|
embeddings.
|
||||||
new model that combines characteristics of the originals.
|
|
||||||
|
|
||||||
## * [Textual Inversion](TEXTUAL_INVERSION.md)
|
- [Image-to-Image Guide for the CLI](IMG2IMG.md)
|
||||||
Personalize models by adding your own style or subjects.
|
|
||||||
|
|
||||||
# Other Features
|
Use a seed image to build new creations in the CLI.
|
||||||
|
|
||||||
## * [The NSFW Checker](NSFW.md)
|
- [Inpainting Guide for the CLI](INPAINTING.md)
|
||||||
Prevent InvokeAI from displaying unwanted racy images.
|
|
||||||
|
|
||||||
## * [Miscellaneous](OTHER.md)
|
Selectively erase and replace portions of an existing image in the CLI.
|
||||||
Run InvokeAI on Google Colab, generate images with repeating patterns,
|
|
||||||
batch process a file of prompts, increase the "creativity" of image
|
- [Outpainting Guide for the CLI](OUTPAINTING.md)
|
||||||
generation by adding initial noise, and more!
|
|
||||||
|
Extend the borders of the image with an "outcrop" function within the
|
||||||
|
CLI.
|
||||||
|
|
||||||
|
- [Generating Variations](VARIATIONS.md)
|
||||||
|
|
||||||
|
Have an image you like and want to generate many more like it?
|
||||||
|
Variations are the ticket.
|
||||||
|
|
||||||
|
- Model Management
|
||||||
|
|
||||||
|
- [Model Installation](../installation/050_INSTALLING_MODELS.md)
|
||||||
|
|
||||||
|
Learn how to import third-party models and switch among them. This guide
|
||||||
|
also covers optimizing models to load quickly.
|
||||||
|
|
||||||
|
- [Merging Models](MODEL_MERGING.md)
|
||||||
|
|
||||||
|
Teach an old model new tricks. Merge 2-3 models together to create a new
|
||||||
|
model that combines characteristics of the originals.
|
||||||
|
|
||||||
|
- [Textual Inversion](TEXTUAL_INVERSION.md)
|
||||||
|
|
||||||
|
Personalize models by adding your own style or subjects.
|
||||||
|
|
||||||
|
- Other Features
|
||||||
|
|
||||||
|
- [The NSFW Checker](NSFW.md)
|
||||||
|
|
||||||
|
Prevent InvokeAI from displaying unwanted racy images.
|
||||||
|
|
||||||
|
- [Miscellaneous](OTHER.md)
|
||||||
|
|
||||||
|
Run InvokeAI on Google Colab, generate images with repeating patterns,
|
||||||
|
batch process a file of prompts, increase the "creativity" of image
|
||||||
|
generation by adding initial noise, and more!
|
||||||
|
4
docs/help/IDE-Settings/index.md
Normal file
4
docs/help/IDE-Settings/index.md
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
# :octicons-file-code-16: IDE-Settings
|
||||||
|
|
||||||
|
Here we will share settings for IDEs used by our developers, maybe you can find
|
||||||
|
something interestening which will help to boost your development efficency 🔥
|
250
docs/help/IDE-Settings/vs-code.md
Normal file
250
docs/help/IDE-Settings/vs-code.md
Normal file
@ -0,0 +1,250 @@
|
|||||||
|
---
|
||||||
|
title: Visual Studio Code
|
||||||
|
---
|
||||||
|
|
||||||
|
# :material-microsoft-visual-studio-code:Visual Studio Code
|
||||||
|
|
||||||
|
The Workspace Settings are stored in the project (repository) root and get
|
||||||
|
higher priorized than your user settings.
|
||||||
|
|
||||||
|
This helps to have different settings for different projects, while the user
|
||||||
|
settings get used as a default value if no workspace settings are provided.
|
||||||
|
|
||||||
|
## tasks.json
|
||||||
|
|
||||||
|
First we will create a task configuration which will create a virtual
|
||||||
|
environment and update the deps (pip, setuptools and wheel).
|
||||||
|
|
||||||
|
Into this venv we will then install the pyproject.toml in editable mode with
|
||||||
|
dev, docs and test dependencies.
|
||||||
|
|
||||||
|
```json title=".vscode/tasks.json"
|
||||||
|
{
|
||||||
|
// See https://go.microsoft.com/fwlink/?LinkId=733558
|
||||||
|
// for the documentation about the tasks.json format
|
||||||
|
"version": "2.0.0",
|
||||||
|
"tasks": [
|
||||||
|
{
|
||||||
|
"label": "Create virtual environment",
|
||||||
|
"detail": "Create .venv and upgrade pip, setuptools and wheel",
|
||||||
|
"command": "python3",
|
||||||
|
"args": [
|
||||||
|
"-m",
|
||||||
|
"venv",
|
||||||
|
".venv",
|
||||||
|
"--prompt",
|
||||||
|
"InvokeAI",
|
||||||
|
"--upgrade-deps"
|
||||||
|
],
|
||||||
|
"runOptions": {
|
||||||
|
"instanceLimit": 1,
|
||||||
|
"reevaluateOnRerun": true
|
||||||
|
},
|
||||||
|
"group": {
|
||||||
|
"kind": "build"
|
||||||
|
},
|
||||||
|
"presentation": {
|
||||||
|
"echo": true,
|
||||||
|
"reveal": "always",
|
||||||
|
"focus": false,
|
||||||
|
"panel": "shared",
|
||||||
|
"showReuseMessage": true,
|
||||||
|
"clear": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "build InvokeAI",
|
||||||
|
"detail": "Build pyproject.toml with extras dev, docs and test",
|
||||||
|
"command": "${workspaceFolder}/.venv/bin/python3",
|
||||||
|
"args": [
|
||||||
|
"-m",
|
||||||
|
"pip",
|
||||||
|
"install",
|
||||||
|
"--use-pep517",
|
||||||
|
"--editable",
|
||||||
|
".[dev,docs,test]"
|
||||||
|
],
|
||||||
|
"dependsOn": "Create virtual environment",
|
||||||
|
"dependsOrder": "sequence",
|
||||||
|
"group": {
|
||||||
|
"kind": "build",
|
||||||
|
"isDefault": true
|
||||||
|
},
|
||||||
|
"presentation": {
|
||||||
|
"echo": true,
|
||||||
|
"reveal": "always",
|
||||||
|
"focus": false,
|
||||||
|
"panel": "shared",
|
||||||
|
"showReuseMessage": true,
|
||||||
|
"clear": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The fastest way to build InvokeAI now is ++cmd+shift+b++
|
||||||
|
|
||||||
|
## launch.json
|
||||||
|
|
||||||
|
This file is used to define debugger configurations, so that you can one-click
|
||||||
|
launch and monitor the application, set halt points to inspect specific states,
|
||||||
|
...
|
||||||
|
|
||||||
|
```json title=".vscode/launch.json"
|
||||||
|
{
|
||||||
|
"version": "0.2.0",
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "invokeai web",
|
||||||
|
"type": "python",
|
||||||
|
"request": "launch",
|
||||||
|
"program": ".venv/bin/invokeai",
|
||||||
|
"justMyCode": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "invokeai cli",
|
||||||
|
"type": "python",
|
||||||
|
"request": "launch",
|
||||||
|
"program": ".venv/bin/invokeai",
|
||||||
|
"justMyCode": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "mkdocs serve",
|
||||||
|
"type": "python",
|
||||||
|
"request": "launch",
|
||||||
|
"program": ".venv/bin/mkdocs",
|
||||||
|
"args": ["serve"],
|
||||||
|
"justMyCode": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you only need to hit ++f5++ and the fun begins :nerd: (It is asumed that
|
||||||
|
you have created a virtual environment via the [tasks](#tasksjson) from the
|
||||||
|
previous step.)
|
||||||
|
|
||||||
|
## extensions.json
|
||||||
|
|
||||||
|
A list of recommended vscode-extensions to make your life easier:
|
||||||
|
|
||||||
|
```json title=".vscode/extensions.json"
|
||||||
|
{
|
||||||
|
"recommendations": [
|
||||||
|
"editorconfig.editorconfig",
|
||||||
|
"github.vscode-pull-request-github",
|
||||||
|
"ms-python.black-formatter",
|
||||||
|
"ms-python.flake8",
|
||||||
|
"ms-python.isort",
|
||||||
|
"ms-python.python",
|
||||||
|
"ms-python.vscode-pylance",
|
||||||
|
"redhat.vscode-yaml",
|
||||||
|
"tamasfe.even-better-toml",
|
||||||
|
"eamodio.gitlens",
|
||||||
|
"foxundermoon.shell-format",
|
||||||
|
"timonwong.shellcheck",
|
||||||
|
"esbenp.prettier-vscode",
|
||||||
|
"davidanson.vscode-markdownlint",
|
||||||
|
"yzhang.markdown-all-in-one",
|
||||||
|
"bierner.github-markdown-preview",
|
||||||
|
"ms-azuretools.vscode-docker",
|
||||||
|
"mads-hartmann.bash-ide-vscode"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## settings.json
|
||||||
|
|
||||||
|
With bellow settings your files already get formated when you save them (only
|
||||||
|
your modifications if available), which will help you to not run into trouble
|
||||||
|
with the pre-commit hooks. If the hooks fail, they will prevent you from
|
||||||
|
commiting, but most hooks directly add a fixed version, so that you just need to
|
||||||
|
stage and commit them:
|
||||||
|
|
||||||
|
```json title=".vscode/settings.json"
|
||||||
|
{
|
||||||
|
"[json]": {
|
||||||
|
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||||
|
"editor.quickSuggestions": {
|
||||||
|
"comments": false,
|
||||||
|
"strings": true,
|
||||||
|
"other": true
|
||||||
|
},
|
||||||
|
"editor.suggest.insertMode": "replace",
|
||||||
|
"gitlens.codeLens.scopes": ["document"]
|
||||||
|
},
|
||||||
|
"[jsonc]": {
|
||||||
|
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
"editor.formatOnSaveMode": "modificationsIfAvailable"
|
||||||
|
},
|
||||||
|
"[python]": {
|
||||||
|
"editor.defaultFormatter": "ms-python.black-formatter",
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
"editor.formatOnSaveMode": "file"
|
||||||
|
},
|
||||||
|
"[toml]": {
|
||||||
|
"editor.defaultFormatter": "tamasfe.even-better-toml",
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
"editor.formatOnSaveMode": "modificationsIfAvailable"
|
||||||
|
},
|
||||||
|
"[yaml]": {
|
||||||
|
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
"editor.formatOnSaveMode": "modificationsIfAvailable"
|
||||||
|
},
|
||||||
|
"[markdown]": {
|
||||||
|
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||||
|
"editor.rulers": [80],
|
||||||
|
"editor.unicodeHighlight.ambiguousCharacters": false,
|
||||||
|
"editor.unicodeHighlight.invisibleCharacters": false,
|
||||||
|
"diffEditor.ignoreTrimWhitespace": false,
|
||||||
|
"editor.wordWrap": "on",
|
||||||
|
"editor.quickSuggestions": {
|
||||||
|
"comments": "off",
|
||||||
|
"strings": "off",
|
||||||
|
"other": "off"
|
||||||
|
},
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
"editor.formatOnSaveMode": "modificationsIfAvailable"
|
||||||
|
},
|
||||||
|
"[shellscript]": {
|
||||||
|
"editor.defaultFormatter": "foxundermoon.shell-format"
|
||||||
|
},
|
||||||
|
"[ignore]": {
|
||||||
|
"editor.defaultFormatter": "foxundermoon.shell-format"
|
||||||
|
},
|
||||||
|
"editor.rulers": [88],
|
||||||
|
"evenBetterToml.formatter.alignEntries": false,
|
||||||
|
"evenBetterToml.formatter.allowedBlankLines": 1,
|
||||||
|
"evenBetterToml.formatter.arrayAutoExpand": true,
|
||||||
|
"evenBetterToml.formatter.arrayTrailingComma": true,
|
||||||
|
"evenBetterToml.formatter.arrayAutoCollapse": true,
|
||||||
|
"evenBetterToml.formatter.columnWidth": 88,
|
||||||
|
"evenBetterToml.formatter.compactArrays": true,
|
||||||
|
"evenBetterToml.formatter.compactInlineTables": true,
|
||||||
|
"evenBetterToml.formatter.indentEntries": false,
|
||||||
|
"evenBetterToml.formatter.inlineTableExpand": true,
|
||||||
|
"evenBetterToml.formatter.reorderArrays": true,
|
||||||
|
"evenBetterToml.formatter.reorderKeys": true,
|
||||||
|
"evenBetterToml.formatter.compactEntries": false,
|
||||||
|
"evenBetterToml.schema.enabled": true,
|
||||||
|
"python.analysis.typeCheckingMode": "basic",
|
||||||
|
"python.formatting.provider": "black",
|
||||||
|
"python.languageServer": "Pylance",
|
||||||
|
"python.linting.enabled": true,
|
||||||
|
"python.linting.flake8Enabled": true,
|
||||||
|
"python.testing.unittestEnabled": false,
|
||||||
|
"python.testing.pytestEnabled": true,
|
||||||
|
"python.testing.pytestArgs": [
|
||||||
|
"tests",
|
||||||
|
"--cov=ldm",
|
||||||
|
"--cov-branch",
|
||||||
|
"--cov-report=term:skip-covered"
|
||||||
|
],
|
||||||
|
"yaml.schemas": {
|
||||||
|
"https://json.schemastore.org/prettierrc.json": "${workspaceFolder}/.prettierrc.yaml"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
135
docs/help/contributing/010_PULL_REQUEST.md
Normal file
135
docs/help/contributing/010_PULL_REQUEST.md
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
---
|
||||||
|
title: Pull-Request
|
||||||
|
---
|
||||||
|
|
||||||
|
# :octicons-git-pull-request-16: Pull-Request
|
||||||
|
|
||||||
|
## pre-requirements
|
||||||
|
|
||||||
|
To follow the steps in this tutorial you will need:
|
||||||
|
|
||||||
|
- [GitHub](https://github.com) account
|
||||||
|
- [git](https://git-scm.com/downloads) source controll
|
||||||
|
- Text / Code Editor (personally I preffer
|
||||||
|
[Visual Studio Code](https://code.visualstudio.com/Download))
|
||||||
|
- Terminal:
|
||||||
|
- If you are on Linux/MacOS you can use bash or zsh
|
||||||
|
- for Windows Users the commands are written for PowerShell
|
||||||
|
|
||||||
|
## Fork Repository
|
||||||
|
|
||||||
|
The first step to be done if you want to contribute to InvokeAI, is to fork the
|
||||||
|
rpeository.
|
||||||
|
|
||||||
|
Since you are already reading this doc, the easiest way to do so is by clicking
|
||||||
|
[here](https://github.com/invoke-ai/InvokeAI/fork). You could also open
|
||||||
|
[InvokeAI](https://github.com/invoke-ai/InvoekAI) and click on the "Fork" Button
|
||||||
|
in the top right.
|
||||||
|
|
||||||
|
## Clone your fork
|
||||||
|
|
||||||
|
After you forked the Repository, you should clone it to your dev machine:
|
||||||
|
|
||||||
|
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
git clone https://github.com/<github username>/InvokeAI \
|
||||||
|
&& cd InvokeAI
|
||||||
|
```
|
||||||
|
|
||||||
|
=== ":fontawesome-brands-windows:Windows"
|
||||||
|
|
||||||
|
``` powershell
|
||||||
|
git clone https://github.com/<github username>/InvokeAI `
|
||||||
|
&& cd InvokeAI
|
||||||
|
```
|
||||||
|
|
||||||
|
## Install in Editable Mode
|
||||||
|
|
||||||
|
To install InvokeAI in editable mode, (as always) we recommend to create and
|
||||||
|
activate a venv first. Afterwards you can install the InvokeAI Package,
|
||||||
|
including dev and docs extras in editable mode, follwed by the installation of
|
||||||
|
the pre-commit hook:
|
||||||
|
|
||||||
|
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
python -m venv .venv \
|
||||||
|
--prompt InvokeAI \
|
||||||
|
--upgrade-deps \
|
||||||
|
&& source .venv/bin/activate \
|
||||||
|
&& pip install \
|
||||||
|
--upgrade-deps \
|
||||||
|
--use-pep517 \
|
||||||
|
--editable=".[dev,docs]" \
|
||||||
|
&& pre-commit install
|
||||||
|
```
|
||||||
|
|
||||||
|
=== ":fontawesome-brands-windows:Windows"
|
||||||
|
|
||||||
|
``` powershell
|
||||||
|
python -m venv .venv `
|
||||||
|
--prompt InvokeAI `
|
||||||
|
--upgrade-deps `
|
||||||
|
&& .venv/scripts/activate.ps1 `
|
||||||
|
&& pip install `
|
||||||
|
--upgrade `
|
||||||
|
--use-pep517 `
|
||||||
|
--editable=".[dev,docs]" `
|
||||||
|
&& pre-commit install
|
||||||
|
```
|
||||||
|
|
||||||
|
## Create a branch
|
||||||
|
|
||||||
|
Make sure you are on main branch, from there create your feature branch:
|
||||||
|
|
||||||
|
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
|
||||||
|
|
||||||
|
``` sh
|
||||||
|
git checkout main \
|
||||||
|
&& git pull \
|
||||||
|
&& git checkout -B <branch name>
|
||||||
|
```
|
||||||
|
|
||||||
|
=== ":fontawesome-brands-windows:Windows"
|
||||||
|
|
||||||
|
``` powershell
|
||||||
|
git checkout main `
|
||||||
|
&& git pull `
|
||||||
|
&& git checkout -B <branch name>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Commit your changes
|
||||||
|
|
||||||
|
When you are done with adding / updating content, you need to commit those
|
||||||
|
changes to your repository before you can actually open an PR:
|
||||||
|
|
||||||
|
```{ .sh .annotate }
|
||||||
|
git add <files you have changed> # (1)!
|
||||||
|
git commit -m "A commit message which describes your change"
|
||||||
|
git push
|
||||||
|
```
|
||||||
|
|
||||||
|
1. Replace this with a space seperated list of the files you changed, like:
|
||||||
|
`README.md foo.sh bar.json baz`
|
||||||
|
|
||||||
|
## Create a Pull Request
|
||||||
|
|
||||||
|
After pushing your changes, you are ready to create a Pull Request. just head
|
||||||
|
over to your fork on [GitHub](https://github.com), which should already show you
|
||||||
|
a message that there have been recent changes on your feature branch and a green
|
||||||
|
button which you could use to create the PR.
|
||||||
|
|
||||||
|
The default target for your PRs would be the main branch of
|
||||||
|
[invoke-ai/InvokeAI](https://github.com/invoke-ai/InvokeAI)
|
||||||
|
|
||||||
|
Another way would be to create it in VS-Code or via the GitHub CLI (or even via
|
||||||
|
the GitHub CLI in a VS-Code Terminal Window 🤭):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
gh pr create
|
||||||
|
```
|
||||||
|
|
||||||
|
The CLI will inform you if there are still unpushed commits on your branch. It
|
||||||
|
will also prompt you for things like the the Title and the Body (Description) if
|
||||||
|
you did not already pass them as arguments.
|
26
docs/help/contributing/020_ISSUES.md
Normal file
26
docs/help/contributing/020_ISSUES.md
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
title: Issues
|
||||||
|
---
|
||||||
|
|
||||||
|
# :octicons-issue-opened-16: Issues
|
||||||
|
|
||||||
|
## :fontawesome-solid-bug: Report a bug
|
||||||
|
|
||||||
|
If you stumbled over a bug while using InvokeAI, we would apreciate it a lot if
|
||||||
|
you
|
||||||
|
[open a issue](https://github.com/invoke-ai/InvokeAI/issues/new?assignees=&labels=bug&template=BUG_REPORT.yml&title=%5Bbug%5D%3A+)
|
||||||
|
to inform us about the details so that our developers can look into it.
|
||||||
|
|
||||||
|
If you also know how to fix the bug, take a look [here](010_PULL_REQUEST.md) to
|
||||||
|
find out how to create a Pull Request.
|
||||||
|
|
||||||
|
## Request a feature
|
||||||
|
|
||||||
|
If you have a idea for a new feature on your mind which you would like to see in
|
||||||
|
InvokeAI, there is a
|
||||||
|
[feature request](https://github.com/invoke-ai/InvokeAI/issues/new?assignees=&labels=bug&template=BUG_REPORT.yml&title=%5Bbug%5D%3A+)
|
||||||
|
available in the issues section of the repository.
|
||||||
|
|
||||||
|
If you are just curious which features already got requested you can find the
|
||||||
|
overview of open requests
|
||||||
|
[here](https://github.com/invoke-ai/InvokeAI/labels/enhancement)
|
32
docs/help/contributing/030_DOCS.md
Normal file
32
docs/help/contributing/030_DOCS.md
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
title: docs
|
||||||
|
---
|
||||||
|
|
||||||
|
# :simple-readthedocs: MkDocs-Material
|
||||||
|
|
||||||
|
If you want to contribute to the docs, there is a easy way to verify the results
|
||||||
|
of your changes before commiting them.
|
||||||
|
|
||||||
|
Just follow the steps in the [Pull-Requests](010_PULL_REQUEST.md) docs, there we
|
||||||
|
already
|
||||||
|
[create a venv and install the docs extras](010_PULL_REQUEST.md#install-in-editable-mode).
|
||||||
|
When installed it's as simple as:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
mkdocs serve
|
||||||
|
```
|
||||||
|
|
||||||
|
This will build the docs locally and serve them on your local host, even
|
||||||
|
auto-refresh is included, so you can just update a doc, save it and tab to the
|
||||||
|
browser, without the needs of restarting the `mkdocs serve`.
|
||||||
|
|
||||||
|
More information about the "mkdocs flavored markdown syntax" can be found
|
||||||
|
[here](https://squidfunk.github.io/mkdocs-material/reference/).
|
||||||
|
|
||||||
|
## :material-microsoft-visual-studio-code:VS-Code
|
||||||
|
|
||||||
|
We also provide a
|
||||||
|
[launch configuration for VS-Code](../IDE-Settings/vs-code.md#launchjson) which
|
||||||
|
includes a `mkdocs serve` entrypoint as well. You also don't have to worry about
|
||||||
|
the formatting since this is automated via prettier, but this is of course not
|
||||||
|
limited to VS-Code.
|
76
docs/help/contributing/090_NODE_TRANSFORMATION.md
Normal file
76
docs/help/contributing/090_NODE_TRANSFORMATION.md
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
# Tranformation to nodes
|
||||||
|
|
||||||
|
## Current state
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
web[WebUI];
|
||||||
|
cli[CLI];
|
||||||
|
web --> |img2img| generate(generate);
|
||||||
|
web --> |txt2img| generate(generate);
|
||||||
|
cli --> |txt2img| generate(generate);
|
||||||
|
cli --> |img2img| generate(generate);
|
||||||
|
generate --> model_manager;
|
||||||
|
generate --> generators;
|
||||||
|
generate --> ti_manager[TI Manager];
|
||||||
|
generate --> etc;
|
||||||
|
```
|
||||||
|
|
||||||
|
## Transitional Architecture
|
||||||
|
|
||||||
|
### first step
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
web[WebUI];
|
||||||
|
cli[CLI];
|
||||||
|
web --> |img2img| img2img_node(Img2img node);
|
||||||
|
web --> |txt2img| generate(generate);
|
||||||
|
img2img_node --> model_manager;
|
||||||
|
img2img_node --> generators;
|
||||||
|
cli --> |txt2img| generate;
|
||||||
|
cli --> |img2img| generate;
|
||||||
|
generate --> model_manager;
|
||||||
|
generate --> generators;
|
||||||
|
generate --> ti_manager[TI Manager];
|
||||||
|
generate --> etc;
|
||||||
|
```
|
||||||
|
|
||||||
|
### second step
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
web[WebUI];
|
||||||
|
cli[CLI];
|
||||||
|
web --> |img2img| img2img_node(img2img node);
|
||||||
|
img2img_node --> model_manager;
|
||||||
|
img2img_node --> generators;
|
||||||
|
web --> |txt2img| txt2img_node(txt2img node);
|
||||||
|
cli --> |txt2img| txt2img_node;
|
||||||
|
cli --> |img2img| generate(generate);
|
||||||
|
generate --> model_manager;
|
||||||
|
generate --> generators;
|
||||||
|
generate --> ti_manager[TI Manager];
|
||||||
|
generate --> etc;
|
||||||
|
txt2img_node --> model_manager;
|
||||||
|
txt2img_node --> generators;
|
||||||
|
txt2img_node --> ti_manager[TI Manager];
|
||||||
|
```
|
||||||
|
|
||||||
|
## Final Architecture
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
web[WebUI];
|
||||||
|
cli[CLI];
|
||||||
|
web --> |img2img|img2img_node(img2img node);
|
||||||
|
cli --> |img2img|img2img_node;
|
||||||
|
web --> |txt2img|txt2img_node(txt2img node);
|
||||||
|
cli --> |txt2img|txt2img_node;
|
||||||
|
img2img_node --> model_manager;
|
||||||
|
txt2img_node --> model_manager;
|
||||||
|
img2img_node --> generators;
|
||||||
|
txt2img_node --> generators;
|
||||||
|
img2img_node --> ti_manager[TI Manager];
|
||||||
|
txt2img_node --> ti_manager[TI Manager];
|
||||||
|
```
|
16
docs/help/contributing/index.md
Normal file
16
docs/help/contributing/index.md
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
title: Contributing
|
||||||
|
---
|
||||||
|
|
||||||
|
# :fontawesome-solid-code-commit: Contributing
|
||||||
|
|
||||||
|
There are different ways how you can contribute to
|
||||||
|
[InvokeAI](https://github.com/invoke-ai/InvokeAI), like Translations, opening
|
||||||
|
Issues for Bugs or ideas how to improve.
|
||||||
|
|
||||||
|
This Section of the docs will explain some of the different ways of how you can
|
||||||
|
contribute to make it easier for newcommers as well as advanced users :nerd:
|
||||||
|
|
||||||
|
If you want to contribute code, but you do not have an exact idea yet, take a
|
||||||
|
look at the currently open
|
||||||
|
[:fontawesome-solid-bug: Bug Reports](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen+label%3Abug)
|
12
docs/help/index.md
Normal file
12
docs/help/index.md
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# :material-help:Help
|
||||||
|
|
||||||
|
If you are looking for help with the installation of InvokeAI, please take a
|
||||||
|
look into the [Installation](../installation/index.md) section of the docs.
|
||||||
|
|
||||||
|
Here you will find help to topics like
|
||||||
|
|
||||||
|
- how to contribute
|
||||||
|
- configuration recommendation for IDEs
|
||||||
|
|
||||||
|
If you have an Idea about what's missing and aren't scared from contributing,
|
||||||
|
just take a look at [DOCS](./contributing/030_DOCS.md) to find out how to do so.
|
295
docs/index.md
295
docs/index.md
@ -2,6 +2,8 @@
|
|||||||
title: Home
|
title: Home
|
||||||
---
|
---
|
||||||
|
|
||||||
|
# :octicons-home-16: Home
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as::
|
The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as::
|
||||||
|
|
||||||
@ -29,36 +31,36 @@ title: Home
|
|||||||
[![github open prs badge]][github open prs link]
|
[![github open prs badge]][github open prs link]
|
||||||
|
|
||||||
[ci checks on dev badge]:
|
[ci checks on dev badge]:
|
||||||
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
|
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
|
||||||
[ci checks on dev link]:
|
[ci checks on dev link]:
|
||||||
https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
|
https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
|
||||||
[ci checks on main badge]:
|
[ci checks on main badge]:
|
||||||
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||||
[ci checks on main link]:
|
[ci checks on main link]:
|
||||||
https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
||||||
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
||||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||||
[github forks badge]:
|
[github forks badge]:
|
||||||
https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||||
[github forks link]:
|
[github forks link]:
|
||||||
https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion
|
https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion
|
||||||
[github open issues badge]:
|
[github open issues badge]:
|
||||||
https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github
|
https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github
|
||||||
[github open issues link]:
|
[github open issues link]:
|
||||||
https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen
|
https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen
|
||||||
[github open prs badge]:
|
[github open prs badge]:
|
||||||
https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github
|
https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github
|
||||||
[github open prs link]:
|
[github open prs link]:
|
||||||
https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
|
https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
|
||||||
[github stars badge]:
|
[github stars badge]:
|
||||||
https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
|
https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
|
||||||
[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
|
[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
|
||||||
[latest commit to dev badge]:
|
[latest commit to dev badge]:
|
||||||
https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
|
https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
|
||||||
[latest commit to dev link]:
|
[latest commit to dev link]:
|
||||||
https://github.com/invoke-ai/InvokeAI/commits/development
|
https://github.com/invoke-ai/InvokeAI/commits/development
|
||||||
[latest release badge]:
|
[latest release badge]:
|
||||||
https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
@ -87,24 +89,24 @@ Q&A</a>]
|
|||||||
|
|
||||||
You wil need one of the following:
|
You wil need one of the following:
|
||||||
|
|
||||||
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||||
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
|
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
|
||||||
only)
|
only)
|
||||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||||
|
|
||||||
We do **not recommend** the following video cards due to issues with their
|
We do **not recommend** the following video cards due to issues with their
|
||||||
running in half-precision mode and having insufficient VRAM to render 512x512
|
running in half-precision mode and having insufficient VRAM to render 512x512
|
||||||
images in full-precision mode:
|
images in full-precision mode:
|
||||||
|
|
||||||
- NVIDIA 10xx series cards such as the 1080ti
|
- NVIDIA 10xx series cards such as the 1080ti
|
||||||
- GTX 1650 series cards
|
- GTX 1650 series cards
|
||||||
- GTX 1660 series cards
|
- GTX 1660 series cards
|
||||||
|
|
||||||
### :fontawesome-solid-memory: Memory and Disk
|
### :fontawesome-solid-memory: Memory and Disk
|
||||||
|
|
||||||
- At least 12 GB Main Memory RAM.
|
- At least 12 GB Main Memory RAM.
|
||||||
- At least 18 GB of free disk space for the machine learning model, Python, and
|
- At least 18 GB of free disk space for the machine learning model, Python,
|
||||||
all its dependencies.
|
and all its dependencies.
|
||||||
|
|
||||||
## :octicons-package-dependencies-24: Installation
|
## :octicons-package-dependencies-24: Installation
|
||||||
|
|
||||||
@ -113,48 +115,65 @@ either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
|||||||
driver).
|
driver).
|
||||||
|
|
||||||
### [Installation Getting Started Guide](installation)
|
### [Installation Getting Started Guide](installation)
|
||||||
|
|
||||||
#### [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
|
#### [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
|
||||||
|
|
||||||
This method is recommended for 1st time users
|
This method is recommended for 1st time users
|
||||||
|
|
||||||
#### [Manual Installation](installation/020_INSTALL_MANUAL.md)
|
#### [Manual Installation](installation/020_INSTALL_MANUAL.md)
|
||||||
|
|
||||||
This method is recommended for experienced users and developers
|
This method is recommended for experienced users and developers
|
||||||
|
|
||||||
#### [Docker Installation](installation/040_INSTALL_DOCKER.md)
|
#### [Docker Installation](installation/040_INSTALL_DOCKER.md)
|
||||||
|
|
||||||
This method is recommended for those familiar with running Docker containers
|
This method is recommended for those familiar with running Docker containers
|
||||||
|
|
||||||
### Other Installation Guides
|
### Other Installation Guides
|
||||||
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
|
|
||||||
- [XFormers](installation/070_INSTALL_XFORMERS.md)
|
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
|
||||||
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
|
- [XFormers](installation/070_INSTALL_XFORMERS.md)
|
||||||
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
|
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
|
||||||
|
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
|
||||||
|
|
||||||
## :octicons-gift-24: InvokeAI Features
|
## :octicons-gift-24: InvokeAI Features
|
||||||
|
|
||||||
### The InvokeAI Web Interface
|
### The InvokeAI Web Interface
|
||||||
- [WebUI overview](features/WEB.md)
|
|
||||||
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
- [WebUI overview](features/WEB.md)
|
||||||
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
||||||
|
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
||||||
<!-- separator -->
|
<!-- separator -->
|
||||||
|
|
||||||
### The InvokeAI Command Line Interface
|
### The InvokeAI Command Line Interface
|
||||||
- [Command Line Interace Reference Guide](features/CLI.md)
|
|
||||||
|
- [Command Line Interace Reference Guide](features/CLI.md)
|
||||||
<!-- separator -->
|
<!-- separator -->
|
||||||
|
|
||||||
### Image Management
|
### Image Management
|
||||||
- [Image2Image](features/IMG2IMG.md)
|
|
||||||
- [Inpainting](features/INPAINTING.md)
|
- [Image2Image](features/IMG2IMG.md)
|
||||||
- [Outpainting](features/OUTPAINTING.md)
|
- [Inpainting](features/INPAINTING.md)
|
||||||
- [Adding custom styles and subjects](features/CONCEPTS.md)
|
- [Outpainting](features/OUTPAINTING.md)
|
||||||
- [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
|
- [Adding custom styles and subjects](features/CONCEPTS.md)
|
||||||
- [Embiggen upscaling](features/EMBIGGEN.md)
|
- [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
|
||||||
- [Other Features](features/OTHER.md)
|
- [Embiggen upscaling](features/EMBIGGEN.md)
|
||||||
|
- [Other Features](features/OTHER.md)
|
||||||
|
|
||||||
<!-- separator -->
|
<!-- separator -->
|
||||||
|
|
||||||
### Model Management
|
### Model Management
|
||||||
- [Installing](installation/050_INSTALLING_MODELS.md)
|
|
||||||
- [Model Merging](features/MODEL_MERGING.md)
|
- [Installing](installation/050_INSTALLING_MODELS.md)
|
||||||
- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md)
|
- [Model Merging](features/MODEL_MERGING.md)
|
||||||
- [Textual Inversion](features/TEXTUAL_INVERSION.md)
|
- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md)
|
||||||
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
|
- [Textual Inversion](features/TEXTUAL_INVERSION.md)
|
||||||
|
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
|
||||||
<!-- seperator -->
|
<!-- seperator -->
|
||||||
|
|
||||||
### Prompt Engineering
|
### Prompt Engineering
|
||||||
- [Prompt Syntax](features/PROMPTS.md)
|
|
||||||
- [Generating Variations](features/VARIATIONS.md)
|
- [Prompt Syntax](features/PROMPTS.md)
|
||||||
|
- [Generating Variations](features/VARIATIONS.md)
|
||||||
|
|
||||||
## :octicons-log-16: Latest Changes
|
## :octicons-log-16: Latest Changes
|
||||||
|
|
||||||
@ -162,84 +181,188 @@ This method is recommended for those familiar with running Docker containers
|
|||||||
|
|
||||||
#### Migration to Stable Diffusion `diffusers` models
|
#### Migration to Stable Diffusion `diffusers` models
|
||||||
|
|
||||||
Previous versions of InvokeAI supported the original model file format introduced with Stable Diffusion 1.4. In the original format, known variously as "checkpoint", or "legacy" format, there is a single large weights file ending with `.ckpt` or `.safetensors`. Though this format has served the community well, it has a number of disadvantages, including file size, slow loading times, and a variety of non-standard variants that require special-case code to handle. In addition, because checkpoint files are actually a bundle of multiple machine learning sub-models, it is hard to swap different sub-models in and out, or to share common sub-models. A new format, introduced by the StabilityAI company in collaboration with HuggingFace, is called `diffusers` and consists of a directory of individual models. The most immediate benefit of `diffusers` is that they load from disk very quickly. A longer term benefit is that in the near future `diffusers` models will be able to share common sub-models, dramatically reducing disk space when you have multiple fine-tune models derived from the same base.
|
Previous versions of InvokeAI supported the original model file format
|
||||||
|
introduced with Stable Diffusion 1.4. In the original format, known variously as
|
||||||
|
"checkpoint", or "legacy" format, there is a single large weights file ending
|
||||||
|
with `.ckpt` or `.safetensors`. Though this format has served the community
|
||||||
|
well, it has a number of disadvantages, including file size, slow loading times,
|
||||||
|
and a variety of non-standard variants that require special-case code to handle.
|
||||||
|
In addition, because checkpoint files are actually a bundle of multiple machine
|
||||||
|
learning sub-models, it is hard to swap different sub-models in and out, or to
|
||||||
|
share common sub-models. A new format, introduced by the StabilityAI company in
|
||||||
|
collaboration with HuggingFace, is called `diffusers` and consists of a
|
||||||
|
directory of individual models. The most immediate benefit of `diffusers` is
|
||||||
|
that they load from disk very quickly. A longer term benefit is that in the near
|
||||||
|
future `diffusers` models will be able to share common sub-models, dramatically
|
||||||
|
reducing disk space when you have multiple fine-tune models derived from the
|
||||||
|
same base.
|
||||||
|
|
||||||
When you perform a new install of version 2.3.0, you will be offered the option to install the `diffusers` versions of a number of popular SD models, including Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of 2.1). These will act and work just like the checkpoint versions. Do not be concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk! InvokeAI 2.3.0 can still load these and generate images from them without any extra intervention on your part.
|
When you perform a new install of version 2.3.0, you will be offered the option
|
||||||
|
to install the `diffusers` versions of a number of popular SD models, including
|
||||||
|
Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of
|
||||||
|
2.1). These will act and work just like the checkpoint versions. Do not be
|
||||||
|
concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk!
|
||||||
|
InvokeAI 2.3.0 can still load these and generate images from them without any
|
||||||
|
extra intervention on your part.
|
||||||
|
|
||||||
To take advantage of the optimized loading times of `diffusers` models, InvokeAI offers options to convert legacy checkpoint models into optimized `diffusers` models. If you use the `invokeai` command line interface, the relevant commands are:
|
To take advantage of the optimized loading times of `diffusers` models, InvokeAI
|
||||||
|
offers options to convert legacy checkpoint models into optimized `diffusers`
|
||||||
|
models. If you use the `invokeai` command line interface, the relevant commands
|
||||||
|
are:
|
||||||
|
|
||||||
* `!convert_model` -- Take the path to a local checkpoint file or a URL that is pointing to one, convert it into a `diffusers` model, and import it into InvokeAI's models registry file.
|
- `!convert_model` -- Take the path to a local checkpoint file or a URL that
|
||||||
* `!optimize_model` -- If you already have a checkpoint model in your InvokeAI models file, this command will accept its short name and convert it into a like-named `diffusers` model, optionally deleting the original checkpoint file.
|
is pointing to one, convert it into a `diffusers` model, and import it into
|
||||||
* `!import_model` -- Take the local path of either a checkpoint file or a `diffusers` model directory and import it into InvokeAI's registry file. You may also provide the ID of any diffusers model that has been published on the [HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads) and it will be downloaded and installed automatically.
|
InvokeAI's models registry file.
|
||||||
|
- `!optimize_model` -- If you already have a checkpoint model in your InvokeAI
|
||||||
|
models file, this command will accept its short name and convert it into a
|
||||||
|
like-named `diffusers` model, optionally deleting the original checkpoint
|
||||||
|
file.
|
||||||
|
- `!import_model` -- Take the local path of either a checkpoint file or a
|
||||||
|
`diffusers` model directory and import it into InvokeAI's registry file. You
|
||||||
|
may also provide the ID of any diffusers model that has been published on
|
||||||
|
the
|
||||||
|
[HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads)
|
||||||
|
and it will be downloaded and installed automatically.
|
||||||
|
|
||||||
The WebGUI offers similar functionality for model management.
|
The WebGUI offers similar functionality for model management.
|
||||||
|
|
||||||
For advanced users, new command-line options provide additional functionality. Launching `invokeai` with the argument `--autoconvert <path to directory>` takes the path to a directory of checkpoint files, automatically converts them into `diffusers` models and imports them. Each time the script is launched, the directory will be scanned for new checkpoint files to be loaded. Alternatively, the `--ckpt_convert` argument will cause any checkpoint or safetensors model that is already registered with InvokeAI to be converted into a `diffusers` model on the fly, allowing you to take advantage of future diffusers-only features without explicitly converting the model and saving it to disk.
|
For advanced users, new command-line options provide additional functionality.
|
||||||
|
Launching `invokeai` with the argument `--autoconvert <path to directory>` takes
|
||||||
|
the path to a directory of checkpoint files, automatically converts them into
|
||||||
|
`diffusers` models and imports them. Each time the script is launched, the
|
||||||
|
directory will be scanned for new checkpoint files to be loaded. Alternatively,
|
||||||
|
the `--ckpt_convert` argument will cause any checkpoint or safetensors model
|
||||||
|
that is already registered with InvokeAI to be converted into a `diffusers`
|
||||||
|
model on the fly, allowing you to take advantage of future diffusers-only
|
||||||
|
features without explicitly converting the model and saving it to disk.
|
||||||
|
|
||||||
Please see [INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/) for more information on model management in both the command-line and Web interfaces.
|
Please see
|
||||||
|
[INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/)
|
||||||
|
for more information on model management in both the command-line and Web
|
||||||
|
interfaces.
|
||||||
|
|
||||||
#### Support for the `XFormers` Memory-Efficient Crossattention Package
|
#### Support for the `XFormers` Memory-Efficient Crossattention Package
|
||||||
|
|
||||||
On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once installed, the`xformers` package dramatically reduces the memory footprint of loaded Stable Diffusion models files and modestly increases image generation speed. `xformers` will be installed and activated automatically if you specify a CUDA system at install time.
|
On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once
|
||||||
|
installed, the`xformers` package dramatically reduces the memory footprint of
|
||||||
|
loaded Stable Diffusion models files and modestly increases image generation
|
||||||
|
speed. `xformers` will be installed and activated automatically if you specify a
|
||||||
|
CUDA system at install time.
|
||||||
|
|
||||||
The caveat with using `xformers` is that it introduces slightly non-deterministic behavior, and images generated using the same seed and other settings will be subtly different between invocations. Generally the changes are unnoticeable unless you rapidly shift back and forth between images, but to disable `xformers` and restore fully deterministic behavior, you may launch InvokeAI using the `--no-xformers` option. This is most conveniently done by opening the file `invokeai/invokeai.init` with a text editor, and adding the line `--no-xformers` at the bottom.
|
The caveat with using `xformers` is that it introduces slightly
|
||||||
|
non-deterministic behavior, and images generated using the same seed and other
|
||||||
|
settings will be subtly different between invocations. Generally the changes are
|
||||||
|
unnoticeable unless you rapidly shift back and forth between images, but to
|
||||||
|
disable `xformers` and restore fully deterministic behavior, you may launch
|
||||||
|
InvokeAI using the `--no-xformers` option. This is most conveniently done by
|
||||||
|
opening the file `invokeai/invokeai.init` with a text editor, and adding the
|
||||||
|
line `--no-xformers` at the bottom.
|
||||||
|
|
||||||
#### A Negative Prompt Box in the WebUI
|
#### A Negative Prompt Box in the WebUI
|
||||||
|
|
||||||
There is now a separate text input box for negative prompts in the WebUI. This is convenient for stashing frequently-used negative prompts ("mangled limbs, bad anatomy"). The `[negative prompt]` syntax continues to work in the main prompt box as well.
|
There is now a separate text input box for negative prompts in the WebUI. This
|
||||||
|
is convenient for stashing frequently-used negative prompts ("mangled limbs, bad
|
||||||
|
anatomy"). The `[negative prompt]` syntax continues to work in the main prompt
|
||||||
|
box as well.
|
||||||
|
|
||||||
To see exactly how your prompts are being parsed, launch `invokeai` with the `--log_tokenization` option. The console window will then display the tokenization process for both positive and negative prompts.
|
To see exactly how your prompts are being parsed, launch `invokeai` with the
|
||||||
|
`--log_tokenization` option. The console window will then display the
|
||||||
|
tokenization process for both positive and negative prompts.
|
||||||
|
|
||||||
#### Model Merging
|
#### Model Merging
|
||||||
|
|
||||||
Version 2.3.0 offers an intuitive user interface for merging up to three Stable Diffusion models using an intuitive user interface. Model merging allows you to mix the behavior of models to achieve very interesting effects. To use this, each of the models must already be imported into InvokeAI and saved in `diffusers` format, then launch the merger using a new menu item in the InvokeAI launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line with `invokeai-merge --gui`. You will be prompted to select the models to merge, the proportions in which to mix them, and the mixing algorithm. The script will create a new merged `diffusers` model and import it into InvokeAI for your use.
|
Version 2.3.0 offers an intuitive user interface for merging up to three Stable
|
||||||
|
Diffusion models using an intuitive user interface. Model merging allows you to
|
||||||
|
mix the behavior of models to achieve very interesting effects. To use this,
|
||||||
|
each of the models must already be imported into InvokeAI and saved in
|
||||||
|
`diffusers` format, then launch the merger using a new menu item in the InvokeAI
|
||||||
|
launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line
|
||||||
|
with `invokeai-merge --gui`. You will be prompted to select the models to merge,
|
||||||
|
the proportions in which to mix them, and the mixing algorithm. The script will
|
||||||
|
create a new merged `diffusers` model and import it into InvokeAI for your use.
|
||||||
|
|
||||||
See [MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/) for more details.
|
See
|
||||||
|
[MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/)
|
||||||
|
for more details.
|
||||||
|
|
||||||
#### Textual Inversion Training
|
#### Textual Inversion Training
|
||||||
|
|
||||||
Textual Inversion (TI) is a technique for training a Stable Diffusion model to emit a particular subject or style when triggered by a keyword phrase. You can perform TI training by placing a small number of images of the subject or style in a directory, and choosing a distinctive trigger phrase, such as "pointillist-style". After successful training, The subject or style will be activated by including `<pointillist-style>` in your prompt.
|
Textual Inversion (TI) is a technique for training a Stable Diffusion model to
|
||||||
|
emit a particular subject or style when triggered by a keyword phrase. You can
|
||||||
|
perform TI training by placing a small number of images of the subject or style
|
||||||
|
in a directory, and choosing a distinctive trigger phrase, such as
|
||||||
|
"pointillist-style". After successful training, The subject or style will be
|
||||||
|
activated by including `<pointillist-style>` in your prompt.
|
||||||
|
|
||||||
Previous versions of InvokeAI were able to perform TI, but it required using a command-line script with dozens of obscure command-line arguments. Version 2.3.0 features an intuitive TI frontend that will build a TI model on top of any `diffusers` model. To access training you can launch from a new item in the launcher script or from the command line using `invokeai-ti --gui`.
|
Previous versions of InvokeAI were able to perform TI, but it required using a
|
||||||
|
command-line script with dozens of obscure command-line arguments. Version 2.3.0
|
||||||
|
features an intuitive TI frontend that will build a TI model on top of any
|
||||||
|
`diffusers` model. To access training you can launch from a new item in the
|
||||||
|
launcher script or from the command line using `invokeai-ti --gui`.
|
||||||
|
|
||||||
See [TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/) for further details.
|
See
|
||||||
|
[TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/)
|
||||||
|
for further details.
|
||||||
|
|
||||||
#### A New Installer Experience
|
#### A New Installer Experience
|
||||||
|
|
||||||
The InvokeAI installer has been upgraded in order to provide a smoother and hopefully more glitch-free experience. In addition, InvokeAI is now packaged as a PyPi project, allowing developers and power-users to install InvokeAI with the command `pip install InvokeAI --use-pep517`. Please see [Installation](#installation) for details.
|
The InvokeAI installer has been upgraded in order to provide a smoother and
|
||||||
|
hopefully more glitch-free experience. In addition, InvokeAI is now packaged as
|
||||||
|
a PyPi project, allowing developers and power-users to install InvokeAI with the
|
||||||
|
command `pip install InvokeAI --use-pep517`. Please see
|
||||||
|
[Installation](#installation) for details.
|
||||||
|
|
||||||
Developers should be aware that the `pip` installation procedure has been simplified and that the `conda` method is no longer supported at all. Accordingly, the `environments_and_requirements` directory has been deleted from the repository.
|
Developers should be aware that the `pip` installation procedure has been
|
||||||
|
simplified and that the `conda` method is no longer supported at all.
|
||||||
|
Accordingly, the `environments_and_requirements` directory has been deleted from
|
||||||
|
the repository.
|
||||||
|
|
||||||
#### Command-line name changes
|
#### Command-line name changes
|
||||||
|
|
||||||
All of InvokeAI's functionality, including the WebUI, command-line interface, textual inversion training and model merging, can all be accessed from the `invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been expanded to add the new functionality. For the convenience of developers and power users, we have normalized the names of the InvokeAI command-line scripts:
|
All of InvokeAI's functionality, including the WebUI, command-line interface,
|
||||||
|
textual inversion training and model merging, can all be accessed from the
|
||||||
|
`invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been
|
||||||
|
expanded to add the new functionality. For the convenience of developers and
|
||||||
|
power users, we have normalized the names of the InvokeAI command-line scripts:
|
||||||
|
|
||||||
* `invokeai` -- Command-line client
|
- `invokeai` -- Command-line client
|
||||||
* `invokeai --web` -- Web GUI
|
- `invokeai --web` -- Web GUI
|
||||||
* `invokeai-merge --gui` -- Model merging script with graphical front end
|
- `invokeai-merge --gui` -- Model merging script with graphical front end
|
||||||
* `invokeai-ti --gui` -- Textual inversion script with graphical front end
|
- `invokeai-ti --gui` -- Textual inversion script with graphical front end
|
||||||
* `invokeai-configure` -- Configuration tool for initializing the `invokeai` directory and selecting popular starter models.
|
- `invokeai-configure` -- Configuration tool for initializing the `invokeai`
|
||||||
|
directory and selecting popular starter models.
|
||||||
|
|
||||||
For backward compatibility, the old command names are also recognized, including `invoke.py` and `configure-invokeai.py`. However, these are deprecated and will eventually be removed.
|
For backward compatibility, the old command names are also recognized, including
|
||||||
|
`invoke.py` and `configure-invokeai.py`. However, these are deprecated and will
|
||||||
|
eventually be removed.
|
||||||
|
|
||||||
Developers should be aware that the locations of the script's source code has been moved. The new locations are:
|
Developers should be aware that the locations of the script's source code has
|
||||||
* `invokeai` => `ldm/invoke/CLI.py`
|
been moved. The new locations are:
|
||||||
* `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py`
|
|
||||||
* `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py`
|
|
||||||
* `invokeai-merge` => `ldm/invoke/merge_diffusers`
|
|
||||||
|
|
||||||
Developers are strongly encouraged to perform an "editable" install of InvokeAI using `pip install -e . --use-pep517` in the Git repository, and then to call the scripts using their 2.3.0 names, rather than executing the scripts directly. Developers should also be aware that the several important data files have been relocated into a new directory named `invokeai`. This includes the WebGUI's `frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used by the installer to select starter models. Eventually all InvokeAI modules will be in subdirectories of `invokeai`.
|
- `invokeai` => `ldm/invoke/CLI.py`
|
||||||
|
- `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py`
|
||||||
|
- `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py`
|
||||||
|
- `invokeai-merge` => `ldm/invoke/merge_diffusers`
|
||||||
|
|
||||||
Please see [2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0) for further details.
|
Developers are strongly encouraged to perform an "editable" install of InvokeAI
|
||||||
For older changelogs, please visit the
|
using `pip install -e . --use-pep517` in the Git repository, and then to call
|
||||||
|
the scripts using their 2.3.0 names, rather than executing the scripts directly.
|
||||||
|
Developers should also be aware that the several important data files have been
|
||||||
|
relocated into a new directory named `invokeai`. This includes the WebGUI's
|
||||||
|
`frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used
|
||||||
|
by the installer to select starter models. Eventually all InvokeAI modules will
|
||||||
|
be in subdirectories of `invokeai`.
|
||||||
|
|
||||||
|
Please see
|
||||||
|
[2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0)
|
||||||
|
for further details. For older changelogs, please visit the
|
||||||
**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
|
**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
|
||||||
|
|
||||||
## :material-target: Troubleshooting
|
## :material-target: Troubleshooting
|
||||||
|
|
||||||
Please check out our **[:material-frequently-asked-questions:
|
Please check out our
|
||||||
Troubleshooting
|
**[:material-frequently-asked-questions: Troubleshooting Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)**
|
||||||
Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)** to
|
to get solutions for common installation problems and other issues.
|
||||||
get solutions for common installation problems and other issues.
|
|
||||||
|
|
||||||
## :octicons-repo-push-24: Contributing
|
## :octicons-repo-push-24: Contributing
|
||||||
|
|
||||||
@ -265,8 +388,8 @@ thank them for their time, hard work and effort.
|
|||||||
For support, please use this repository's GitHub Issues tracking service. Feel
|
For support, please use this repository's GitHub Issues tracking service. Feel
|
||||||
free to send me an email if you use and like the script.
|
free to send me an email if you use and like the script.
|
||||||
|
|
||||||
Original portions of the software are Copyright (c) 2022-23
|
Original portions of the software are Copyright (c) 2022-23 by
|
||||||
by [The InvokeAI Team](https://github.com/invoke-ai).
|
[The InvokeAI Team](https://github.com/invoke-ai).
|
||||||
|
|
||||||
## :octicons-book-24: Further Reading
|
## :octicons-book-24: Further Reading
|
||||||
|
|
||||||
|
@ -1,5 +0,0 @@
|
|||||||
mkdocs
|
|
||||||
mkdocs-material>=8, <9
|
|
||||||
mkdocs-git-revision-date-localized-plugin
|
|
||||||
mkdocs-redirects==1.2.0
|
|
||||||
|
|
@ -1,5 +1,8 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
# coauthored by Lincoln Stein, Eugene Brodsky and JoshuaKimsey
|
||||||
|
# Copyright 2023, The InvokeAI Development Team
|
||||||
|
|
||||||
####
|
####
|
||||||
# This launch script assumes that:
|
# This launch script assumes that:
|
||||||
# 1. it is located in the runtime directory,
|
# 1. it is located in the runtime directory,
|
||||||
@ -18,78 +21,135 @@ cd "$scriptdir"
|
|||||||
. .venv/bin/activate
|
. .venv/bin/activate
|
||||||
|
|
||||||
export INVOKEAI_ROOT="$scriptdir"
|
export INVOKEAI_ROOT="$scriptdir"
|
||||||
|
PARAMS=$@
|
||||||
|
|
||||||
# set required env var for torch on mac MPS
|
# set required env var for torch on mac MPS
|
||||||
if [ "$(uname -s)" == "Darwin" ]; then
|
if [ "$(uname -s)" == "Darwin" ]; then
|
||||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
while true
|
do_choice() {
|
||||||
do
|
case $1 in
|
||||||
if [ "$0" != "bash" ]; then
|
1)
|
||||||
echo "Do you want to generate images using the"
|
echo "Generate images with a browser-based interface"
|
||||||
echo "1. command-line interface"
|
clear
|
||||||
echo "2. browser-based UI"
|
invokeai --web $PARAMS
|
||||||
echo "3. run textual inversion training"
|
|
||||||
echo "4. merge models (diffusers type only)"
|
|
||||||
echo "5. download and install models"
|
|
||||||
echo "6. change InvokeAI startup options"
|
|
||||||
echo "7. re-run the configure script to fix a broken install"
|
|
||||||
echo "8. open the developer console"
|
|
||||||
echo "9. update InvokeAI"
|
|
||||||
echo "10. command-line help"
|
|
||||||
echo "Q - Quit"
|
|
||||||
echo ""
|
|
||||||
read -p "Please enter 1-10, Q: [2] " yn
|
|
||||||
choice=${yn:='2'}
|
|
||||||
case $choice in
|
|
||||||
1)
|
|
||||||
echo "Starting the InvokeAI command-line..."
|
|
||||||
invokeai $@
|
|
||||||
;;
|
;;
|
||||||
2)
|
2)
|
||||||
echo "Starting the InvokeAI browser-based UI..."
|
echo "Generate images using a command-line interface"
|
||||||
invokeai --web $@
|
clear
|
||||||
|
invokeai $PARAMS
|
||||||
;;
|
;;
|
||||||
3)
|
3)
|
||||||
echo "Starting Textual Inversion:"
|
echo "Textual inversion training"
|
||||||
invokeai-ti --gui $@
|
clear
|
||||||
|
invokeai-ti --gui $PARAMS
|
||||||
;;
|
;;
|
||||||
4)
|
4)
|
||||||
echo "Merging Models:"
|
echo "Merge models (diffusers type only)"
|
||||||
invokeai-merge --gui $@
|
clear
|
||||||
|
invokeai-merge --gui $PARAMS
|
||||||
;;
|
;;
|
||||||
5)
|
5)
|
||||||
|
echo "Download and install models"
|
||||||
|
clear
|
||||||
invokeai-model-install --root ${INVOKEAI_ROOT}
|
invokeai-model-install --root ${INVOKEAI_ROOT}
|
||||||
;;
|
;;
|
||||||
6)
|
6)
|
||||||
|
echo "Change InvokeAI startup options"
|
||||||
|
clear
|
||||||
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
|
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
|
||||||
;;
|
;;
|
||||||
7)
|
7)
|
||||||
|
echo "Re-run the configure script to fix a broken install"
|
||||||
|
clear
|
||||||
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
|
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
|
||||||
;;
|
;;
|
||||||
8)
|
8)
|
||||||
echo "Developer Console:"
|
echo "Open the developer console"
|
||||||
|
clear
|
||||||
file_name=$(basename "${BASH_SOURCE[0]}")
|
file_name=$(basename "${BASH_SOURCE[0]}")
|
||||||
bash --init-file "$file_name"
|
bash --init-file "$file_name"
|
||||||
;;
|
;;
|
||||||
9)
|
9)
|
||||||
echo "Update:"
|
echo "Update InvokeAI"
|
||||||
|
clear
|
||||||
invokeai-update
|
invokeai-update
|
||||||
;;
|
;;
|
||||||
10)
|
10)
|
||||||
|
echo "Command-line help"
|
||||||
|
clear
|
||||||
invokeai --help
|
invokeai --help
|
||||||
;;
|
;;
|
||||||
[qQ])
|
*)
|
||||||
exit 0
|
echo "Exiting..."
|
||||||
|
exit
|
||||||
;;
|
;;
|
||||||
*)
|
|
||||||
echo "Invalid selection"
|
|
||||||
exit;;
|
|
||||||
esac
|
esac
|
||||||
|
clear
|
||||||
|
}
|
||||||
|
|
||||||
|
do_dialog() {
|
||||||
|
while true
|
||||||
|
do
|
||||||
|
options=(
|
||||||
|
1 "Generate images with a browser-based interface"
|
||||||
|
2 "Generate images using a command-line interface"
|
||||||
|
3 "Textual inversion training"
|
||||||
|
4 "Merge models (diffusers type only)"
|
||||||
|
5 "Download and install models"
|
||||||
|
6 "Change InvokeAI startup options"
|
||||||
|
7 "Re-run the configure script to fix a broken install"
|
||||||
|
8 "Open the developer console"
|
||||||
|
9 "Update InvokeAI"
|
||||||
|
10 "Command-line help")
|
||||||
|
|
||||||
|
choice=$(dialog --clear \
|
||||||
|
--backtitle "InvokeAI" \
|
||||||
|
--title "What you like to run?" \
|
||||||
|
--menu "Select an option:" \
|
||||||
|
0 0 0 \
|
||||||
|
"${options[@]}" \
|
||||||
|
2>&1 >/dev/tty) || clear
|
||||||
|
do_choice "$choice"
|
||||||
|
done
|
||||||
|
clear
|
||||||
|
}
|
||||||
|
|
||||||
|
do_line_input() {
|
||||||
|
echo " ** For a more attractive experience, please install the 'dialog' utility. **"
|
||||||
|
echo ""
|
||||||
|
while true
|
||||||
|
do
|
||||||
|
echo "Do you want to generate images using the"
|
||||||
|
echo "1. browser-based UI"
|
||||||
|
echo "2. command-line interface"
|
||||||
|
echo "3. run textual inversion training"
|
||||||
|
echo "4. merge models (diffusers type only)"
|
||||||
|
echo "5. download and install models"
|
||||||
|
echo "6. change InvokeAI startup options"
|
||||||
|
echo "7. re-run the configure script to fix a broken install"
|
||||||
|
echo "8. open the developer console"
|
||||||
|
echo "9. update InvokeAI"
|
||||||
|
echo "10. command-line help"
|
||||||
|
echo "Q - Quit"
|
||||||
|
echo ""
|
||||||
|
read -p "Please enter 1-10, Q: [1] " yn
|
||||||
|
choice=${yn:='1'}
|
||||||
|
do_choice $choice
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ "$0" != "bash" ]; then
|
||||||
|
# Dialog seems to be a standard installtion for most Linux distros, but this checks to ensure it is present regardless
|
||||||
|
if command -v dialog &> /dev/null ; then
|
||||||
|
do_dialog
|
||||||
|
else
|
||||||
|
do_line_input
|
||||||
|
fi
|
||||||
else # in developer console
|
else # in developer console
|
||||||
python --version
|
python --version
|
||||||
echo "Press ^D to exit"
|
echo "Press ^D to exit"
|
||||||
export PS1="(InvokeAI) \u@\h \w> "
|
export PS1="(InvokeAI) \u@\h \w> "
|
||||||
fi
|
fi
|
||||||
done
|
|
||||||
|
@ -13,14 +13,19 @@ sd-inpainting-1.5:
|
|||||||
vae:
|
vae:
|
||||||
repo_id: stabilityai/sd-vae-ft-mse
|
repo_id: stabilityai/sd-vae-ft-mse
|
||||||
recommended: True
|
recommended: True
|
||||||
stable-diffusion-2.1:
|
stable-diffusion-2.1-768:
|
||||||
description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)
|
description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)
|
||||||
repo_id: stabilityai/stable-diffusion-2-1
|
repo_id: stabilityai/stable-diffusion-2-1
|
||||||
format: diffusers
|
format: diffusers
|
||||||
recommended: True
|
recommended: True
|
||||||
|
stable-diffusion-2.1-base:
|
||||||
|
description: Stable Diffusion version 2.1 diffusers model, trained on 512 pixel images (5.21 GB)
|
||||||
|
repo_id: stabilityai/stable-diffusion-2-1-base
|
||||||
|
format: diffusers
|
||||||
|
recommended: False
|
||||||
sd-inpainting-2.0:
|
sd-inpainting-2.0:
|
||||||
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
|
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
|
||||||
repo_id: stabilityai/stable-diffusion-2-1
|
repo_id: stabilityai/stable-diffusion-2-inpainting
|
||||||
format: diffusers
|
format: diffusers
|
||||||
recommended: False
|
recommended: False
|
||||||
analog-diffusion-1.0:
|
analog-diffusion-1.0:
|
||||||
|
67
invokeai/configs/stable-diffusion/v2-inference.yaml
Normal file
67
invokeai/configs/stable-diffusion/v2-inference.yaml
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
model:
|
||||||
|
base_learning_rate: 1.0e-4
|
||||||
|
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||||
|
params:
|
||||||
|
linear_start: 0.00085
|
||||||
|
linear_end: 0.0120
|
||||||
|
num_timesteps_cond: 1
|
||||||
|
log_every_t: 200
|
||||||
|
timesteps: 1000
|
||||||
|
first_stage_key: "jpg"
|
||||||
|
cond_stage_key: "txt"
|
||||||
|
image_size: 64
|
||||||
|
channels: 4
|
||||||
|
cond_stage_trainable: false
|
||||||
|
conditioning_key: crossattn
|
||||||
|
monitor: val/loss_simple_ema
|
||||||
|
scale_factor: 0.18215
|
||||||
|
use_ema: False # we set this to false because this is an inference only config
|
||||||
|
|
||||||
|
unet_config:
|
||||||
|
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||||
|
params:
|
||||||
|
use_checkpoint: True
|
||||||
|
use_fp16: True
|
||||||
|
image_size: 32 # unused
|
||||||
|
in_channels: 4
|
||||||
|
out_channels: 4
|
||||||
|
model_channels: 320
|
||||||
|
attention_resolutions: [ 4, 2, 1 ]
|
||||||
|
num_res_blocks: 2
|
||||||
|
channel_mult: [ 1, 2, 4, 4 ]
|
||||||
|
num_head_channels: 64 # need to fix for flash-attn
|
||||||
|
use_spatial_transformer: True
|
||||||
|
use_linear_in_transformer: True
|
||||||
|
transformer_depth: 1
|
||||||
|
context_dim: 1024
|
||||||
|
legacy: False
|
||||||
|
|
||||||
|
first_stage_config:
|
||||||
|
target: ldm.models.autoencoder.AutoencoderKL
|
||||||
|
params:
|
||||||
|
embed_dim: 4
|
||||||
|
monitor: val/rec_loss
|
||||||
|
ddconfig:
|
||||||
|
#attn_type: "vanilla-xformers"
|
||||||
|
double_z: true
|
||||||
|
z_channels: 4
|
||||||
|
resolution: 256
|
||||||
|
in_channels: 3
|
||||||
|
out_ch: 3
|
||||||
|
ch: 128
|
||||||
|
ch_mult:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 4
|
||||||
|
- 4
|
||||||
|
num_res_blocks: 2
|
||||||
|
attn_resolutions: []
|
||||||
|
dropout: 0.0
|
||||||
|
lossconfig:
|
||||||
|
target: torch.nn.Identity
|
||||||
|
|
||||||
|
cond_stage_config:
|
||||||
|
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
|
||||||
|
params:
|
||||||
|
freeze: True
|
||||||
|
layer: "penultimate"
|
@ -1,6 +1,7 @@
|
|||||||
module.exports = {
|
module.exports = {
|
||||||
trailingComma: 'es5',
|
trailingComma: 'es5',
|
||||||
tabWidth: 2,
|
tabWidth: 2,
|
||||||
|
endOfLine: 'auto',
|
||||||
semi: true,
|
semi: true,
|
||||||
singleQuote: true,
|
singleQuote: true,
|
||||||
overrides: [
|
overrides: [
|
||||||
|
File diff suppressed because one or more lines are too long
2
invokeai/frontend/dist/index.html
vendored
2
invokeai/frontend/dist/index.html
vendored
@ -5,7 +5,7 @@
|
|||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||||
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
|
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
|
||||||
<script type="module" crossorigin src="./assets/index-0e39fbc4.js"></script>
|
<script type="module" crossorigin src="./assets/index-c09cf9ca.js"></script>
|
||||||
<link rel="stylesheet" href="./assets/index-14cb2922.css">
|
<link rel="stylesheet" href="./assets/index-14cb2922.css">
|
||||||
</head>
|
</head>
|
||||||
|
|
||||||
|
10
invokeai/frontend/dist/locales/en.json
vendored
10
invokeai/frontend/dist/locales/en.json
vendored
@ -63,7 +63,8 @@
|
|||||||
"statusConvertingModel": "Converting Model",
|
"statusConvertingModel": "Converting Model",
|
||||||
"statusModelConverted": "Model Converted",
|
"statusModelConverted": "Model Converted",
|
||||||
"statusMergingModels": "Merging Models",
|
"statusMergingModels": "Merging Models",
|
||||||
"statusMergedModels": "Models Merged"
|
"statusMergedModels": "Models Merged",
|
||||||
|
"pinOptionsPanel": "Pin Options Panel"
|
||||||
},
|
},
|
||||||
"gallery": {
|
"gallery": {
|
||||||
"generations": "Generations",
|
"generations": "Generations",
|
||||||
@ -364,7 +365,8 @@
|
|||||||
"convertToDiffusersHelpText6": "Do you wish to convert this model?",
|
"convertToDiffusersHelpText6": "Do you wish to convert this model?",
|
||||||
"convertToDiffusersSaveLocation": "Save Location",
|
"convertToDiffusersSaveLocation": "Save Location",
|
||||||
"v1": "v1",
|
"v1": "v1",
|
||||||
"v2": "v2",
|
"v2_base": "v2 (512px)",
|
||||||
|
"v2_768": "v2 (768px)",
|
||||||
"inpainting": "v1 Inpainting",
|
"inpainting": "v1 Inpainting",
|
||||||
"customConfig": "Custom Config",
|
"customConfig": "Custom Config",
|
||||||
"pathToCustomConfig": "Path To Custom Config",
|
"pathToCustomConfig": "Path To Custom Config",
|
||||||
@ -393,7 +395,9 @@
|
|||||||
"modelMergeInterpAddDifferenceHelp": "In this mode, Model 3 is first subtracted from Model 2. The resulting version is blended with Model 1 with the alpha rate set above.",
|
"modelMergeInterpAddDifferenceHelp": "In this mode, Model 3 is first subtracted from Model 2. The resulting version is blended with Model 1 with the alpha rate set above.",
|
||||||
"inverseSigmoid": "Inverse Sigmoid",
|
"inverseSigmoid": "Inverse Sigmoid",
|
||||||
"sigmoid": "Sigmoid",
|
"sigmoid": "Sigmoid",
|
||||||
"weightedSum": "Weighted Sum"
|
"weightedSum": "Weighted Sum",
|
||||||
|
"none": "none",
|
||||||
|
"addDifference": "Add Difference"
|
||||||
},
|
},
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"general": "General",
|
"general": "General",
|
||||||
|
115
invokeai/frontend/dist/locales/es.json
vendored
115
invokeai/frontend/dist/locales/es.json
vendored
@ -15,7 +15,7 @@
|
|||||||
"langSpanish": "Español",
|
"langSpanish": "Español",
|
||||||
"nodesDesc": "Un sistema de generación de imágenes basado en nodos, actualmente se encuentra en desarrollo. Mantente pendiente a nuestras actualizaciones acerca de esta fabulosa funcionalidad.",
|
"nodesDesc": "Un sistema de generación de imágenes basado en nodos, actualmente se encuentra en desarrollo. Mantente pendiente a nuestras actualizaciones acerca de esta fabulosa funcionalidad.",
|
||||||
"postProcessing": "Post-procesamiento",
|
"postProcessing": "Post-procesamiento",
|
||||||
"postProcessDesc1": "Invoke AI ofrece una gran variedad de funciones de post-procesamiento, El aumento de tamaño y Restauración de Rostros ya se encuentran disponibles en la interfaz web, puedes acceder desde el menú de Opciones Avanzadas en las pestañas de Texto a Imagen y de Imagen a Imagen. También puedes acceder a estas funciones directamente mediante el botón de acciones en el menú superior de la imagen actual o en el visualizador",
|
"postProcessDesc1": "Invoke AI ofrece una gran variedad de funciones de post-procesamiento, El aumento de tamaño y Restauración de Rostros ya se encuentran disponibles en la interfaz web, puedes acceder desde el menú de Opciones Avanzadas en las pestañas de Texto a Imagen y de Imagen a Imagen. También puedes acceder a estas funciones directamente mediante el botón de acciones en el menú superior de la imagen actual o en el visualizador.",
|
||||||
"postProcessDesc2": "Una interfaz de usuario dedicada se lanzará pronto para facilitar flujos de trabajo de postprocesamiento más avanzado.",
|
"postProcessDesc2": "Una interfaz de usuario dedicada se lanzará pronto para facilitar flujos de trabajo de postprocesamiento más avanzado.",
|
||||||
"postProcessDesc3": "La Interfaz de Línea de Comandos de Invoke AI ofrece muchas otras características, incluyendo -Embiggen-.",
|
"postProcessDesc3": "La Interfaz de Línea de Comandos de Invoke AI ofrece muchas otras características, incluyendo -Embiggen-.",
|
||||||
"training": "Entrenamiento",
|
"training": "Entrenamiento",
|
||||||
@ -44,7 +44,26 @@
|
|||||||
"statusUpscaling": "Aumentando Tamaño",
|
"statusUpscaling": "Aumentando Tamaño",
|
||||||
"statusUpscalingESRGAN": "Restaurando Rostros(ESRGAN)",
|
"statusUpscalingESRGAN": "Restaurando Rostros(ESRGAN)",
|
||||||
"statusLoadingModel": "Cargando Modelo",
|
"statusLoadingModel": "Cargando Modelo",
|
||||||
"statusModelChanged": "Modelo cambiado"
|
"statusModelChanged": "Modelo cambiado",
|
||||||
|
"statusMergedModels": "Modelos combinados",
|
||||||
|
"githubLabel": "Github",
|
||||||
|
"discordLabel": "Discord",
|
||||||
|
"langEnglish": "Inglés",
|
||||||
|
"langDutch": "Holandés",
|
||||||
|
"langFrench": "Francés",
|
||||||
|
"langGerman": "Alemán",
|
||||||
|
"langItalian": "Italiano",
|
||||||
|
"langArabic": "Árabe",
|
||||||
|
"langJapanese": "Japones",
|
||||||
|
"langPolish": "Polaco",
|
||||||
|
"langBrPortuguese": "Portugués brasileño",
|
||||||
|
"langRussian": "Ruso",
|
||||||
|
"langSimplifiedChinese": "Chino simplificado",
|
||||||
|
"langUkranian": "Ucraniano",
|
||||||
|
"back": "Atrás",
|
||||||
|
"statusConvertingModel": "Convertir el modelo",
|
||||||
|
"statusModelConverted": "Modelo adaptado",
|
||||||
|
"statusMergingModels": "Fusionar modelos"
|
||||||
},
|
},
|
||||||
"gallery": {
|
"gallery": {
|
||||||
"generations": "Generaciones",
|
"generations": "Generaciones",
|
||||||
@ -284,16 +303,16 @@
|
|||||||
"nameValidationMsg": "Introduce un nombre para tu modelo",
|
"nameValidationMsg": "Introduce un nombre para tu modelo",
|
||||||
"description": "Descripción",
|
"description": "Descripción",
|
||||||
"descriptionValidationMsg": "Introduce una descripción para tu modelo",
|
"descriptionValidationMsg": "Introduce una descripción para tu modelo",
|
||||||
"config": "Config",
|
"config": "Configurar",
|
||||||
"configValidationMsg": "Ruta del archivo de configuración del modelo",
|
"configValidationMsg": "Ruta del archivo de configuración del modelo.",
|
||||||
"modelLocation": "Ubicación del Modelo",
|
"modelLocation": "Ubicación del Modelo",
|
||||||
"modelLocationValidationMsg": "Ruta del archivo de modelo",
|
"modelLocationValidationMsg": "Ruta del archivo de modelo.",
|
||||||
"vaeLocation": "Ubicación VAE",
|
"vaeLocation": "Ubicación VAE",
|
||||||
"vaeLocationValidationMsg": "Ruta del archivo VAE",
|
"vaeLocationValidationMsg": "Ruta del archivo VAE.",
|
||||||
"width": "Ancho",
|
"width": "Ancho",
|
||||||
"widthValidationMsg": "Ancho predeterminado de tu modelo",
|
"widthValidationMsg": "Ancho predeterminado de tu modelo.",
|
||||||
"height": "Alto",
|
"height": "Alto",
|
||||||
"heightValidationMsg": "Alto predeterminado de tu modelo",
|
"heightValidationMsg": "Alto predeterminado de tu modelo.",
|
||||||
"addModel": "Añadir Modelo",
|
"addModel": "Añadir Modelo",
|
||||||
"updateModel": "Actualizar Modelo",
|
"updateModel": "Actualizar Modelo",
|
||||||
"availableModels": "Modelos disponibles",
|
"availableModels": "Modelos disponibles",
|
||||||
@ -320,7 +339,61 @@
|
|||||||
"deleteModel": "Eliminar Modelo",
|
"deleteModel": "Eliminar Modelo",
|
||||||
"deleteConfig": "Eliminar Configuración",
|
"deleteConfig": "Eliminar Configuración",
|
||||||
"deleteMsg1": "¿Estás seguro de querer eliminar esta entrada de modelo de InvokeAI?",
|
"deleteMsg1": "¿Estás seguro de querer eliminar esta entrada de modelo de InvokeAI?",
|
||||||
"deleteMsg2": "El checkpoint del modelo no se eliminará de tu disco. Puedes volver a añadirlo si lo deseas."
|
"deleteMsg2": "El checkpoint del modelo no se eliminará de tu disco. Puedes volver a añadirlo si lo deseas.",
|
||||||
|
"safetensorModels": "SafeTensors",
|
||||||
|
"addDiffuserModel": "Añadir difusores",
|
||||||
|
"inpainting": "v1 Repintado",
|
||||||
|
"repoIDValidationMsg": "Repositorio en línea de tu modelo",
|
||||||
|
"checkpointModels": "Puntos de control",
|
||||||
|
"convertToDiffusersHelpText4": "Este proceso se realiza una sola vez. Puede tardar entre 30 y 60 segundos dependiendo de las especificaciones de tu ordenador.",
|
||||||
|
"diffusersModels": "Difusores",
|
||||||
|
"addCheckpointModel": "Agregar modelo de punto de control/Modelo Safetensor",
|
||||||
|
"vaeRepoID": "Identificador del repositorio de VAE",
|
||||||
|
"vaeRepoIDValidationMsg": "Repositorio en línea de tú VAE",
|
||||||
|
"formMessageDiffusersModelLocation": "Difusores Modelo Ubicación",
|
||||||
|
"formMessageDiffusersModelLocationDesc": "Por favor, introduzca al menos uno.",
|
||||||
|
"formMessageDiffusersVAELocation": "Ubicación VAE",
|
||||||
|
"formMessageDiffusersVAELocationDesc": "Si no se proporciona, InvokeAI buscará el archivo VAE dentro de la ubicación del modelo indicada anteriormente.",
|
||||||
|
"convert": "Convertir",
|
||||||
|
"convertToDiffusers": "Convertir en difusores",
|
||||||
|
"convertToDiffusersHelpText1": "Este modelo se convertirá al formato 🧨 Difusores.",
|
||||||
|
"convertToDiffusersHelpText2": "Este proceso sustituirá su entrada del Gestor de Modelos por la versión de Difusores del mismo modelo.",
|
||||||
|
"convertToDiffusersHelpText3": "Su archivo de puntos de control en el disco NO será borrado ni modificado de ninguna manera. Puede volver a añadir su punto de control al Gestor de Modelos si lo desea.",
|
||||||
|
"convertToDiffusersHelpText5": "Asegúrese de que dispone de suficiente espacio en disco. Los modelos suelen variar entre 4 GB y 7 GB de tamaño.",
|
||||||
|
"convertToDiffusersHelpText6": "¿Desea transformar este modelo?",
|
||||||
|
"convertToDiffusersSaveLocation": "Guardar ubicación",
|
||||||
|
"v1": "v1",
|
||||||
|
"v2": "v2",
|
||||||
|
"statusConverting": "Adaptar",
|
||||||
|
"modelConverted": "Modelo adaptado",
|
||||||
|
"sameFolder": "La misma carpeta",
|
||||||
|
"invokeRoot": "Carpeta InvokeAI",
|
||||||
|
"custom": "Personalizado",
|
||||||
|
"customSaveLocation": "Ubicación personalizada para guardar",
|
||||||
|
"merge": "Fusión",
|
||||||
|
"modelsMerged": "Modelos fusionados",
|
||||||
|
"mergeModels": "Combinar modelos",
|
||||||
|
"modelOne": "Modelo 1",
|
||||||
|
"modelTwo": "Modelo 2",
|
||||||
|
"modelThree": "Modelo 3",
|
||||||
|
"mergedModelName": "Nombre del modelo combinado",
|
||||||
|
"alpha": "Alfa",
|
||||||
|
"interpolationType": "Tipo de interpolación",
|
||||||
|
"mergedModelSaveLocation": "Guardar ubicación",
|
||||||
|
"mergedModelCustomSaveLocation": "Ruta personalizada",
|
||||||
|
"invokeAIFolder": "Invocar carpeta de la inteligencia artificial",
|
||||||
|
"modelMergeHeaderHelp2": "Sólo se pueden fusionar difusores. Si desea fusionar un modelo de punto de control, conviértalo primero en difusores.",
|
||||||
|
"modelMergeAlphaHelp": "Alfa controla la fuerza de mezcla de los modelos. Los valores alfa más bajos reducen la influencia del segundo modelo.",
|
||||||
|
"modelMergeInterpAddDifferenceHelp": "En este modo, el Modelo 3 se sustrae primero del Modelo 2. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente.",
|
||||||
|
"ignoreMismatch": "Ignorar discrepancias entre modelos seleccionados",
|
||||||
|
"modelMergeHeaderHelp1": "Puede combinar hasta tres modelos diferentes para crear una mezcla que se adapte a sus necesidades.",
|
||||||
|
"inverseSigmoid": "Sigmoideo inverso",
|
||||||
|
"weightedSum": "Modelo de suma ponderada",
|
||||||
|
"sigmoid": "Función sigmoide",
|
||||||
|
"allModels": "Todos los modelos",
|
||||||
|
"repo_id": "Identificador del repositorio",
|
||||||
|
"pathToCustomConfig": "Ruta a la configuración personalizada",
|
||||||
|
"customConfig": "Configuración personalizada"
|
||||||
},
|
},
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"images": "Imágenes",
|
"images": "Imágenes",
|
||||||
@ -380,7 +453,22 @@
|
|||||||
"info": "Información",
|
"info": "Información",
|
||||||
"deleteImage": "Eliminar Imagen",
|
"deleteImage": "Eliminar Imagen",
|
||||||
"initialImage": "Imagen Inicial",
|
"initialImage": "Imagen Inicial",
|
||||||
"showOptionsPanel": "Mostrar panel de opciones"
|
"showOptionsPanel": "Mostrar panel de opciones",
|
||||||
|
"symmetry": "Simetría",
|
||||||
|
"vSymmetryStep": "Paso de simetría V",
|
||||||
|
"hSymmetryStep": "Paso de simetría H",
|
||||||
|
"cancel": {
|
||||||
|
"immediate": "Cancelar inmediatamente",
|
||||||
|
"schedule": "Cancelar tras la iteración actual",
|
||||||
|
"isScheduled": "Cancelando",
|
||||||
|
"setType": "Tipo de cancelación"
|
||||||
|
},
|
||||||
|
"copyImage": "Copiar la imagen",
|
||||||
|
"general": "General",
|
||||||
|
"negativePrompts": "Preguntas negativas",
|
||||||
|
"imageToImage": "Imagen a imagen",
|
||||||
|
"denoisingStrength": "Intensidad de la eliminación del ruido",
|
||||||
|
"hiresStrength": "Alta resistencia"
|
||||||
},
|
},
|
||||||
"settings": {
|
"settings": {
|
||||||
"models": "Modelos",
|
"models": "Modelos",
|
||||||
@ -393,7 +481,8 @@
|
|||||||
"resetWebUI": "Restablecer interfaz web",
|
"resetWebUI": "Restablecer interfaz web",
|
||||||
"resetWebUIDesc1": "Al restablecer la interfaz web, solo se restablece la caché local del navegador de sus imágenes y la configuración guardada. No se elimina ninguna imagen de su disco duro.",
|
"resetWebUIDesc1": "Al restablecer la interfaz web, solo se restablece la caché local del navegador de sus imágenes y la configuración guardada. No se elimina ninguna imagen de su disco duro.",
|
||||||
"resetWebUIDesc2": "Si las imágenes no se muestran en la galería o algo más no funciona, intente restablecer antes de reportar un incidente en GitHub.",
|
"resetWebUIDesc2": "Si las imágenes no se muestran en la galería o algo más no funciona, intente restablecer antes de reportar un incidente en GitHub.",
|
||||||
"resetComplete": "La interfaz web se ha restablecido. Actualice la página para recargarla."
|
"resetComplete": "La interfaz web se ha restablecido. Actualice la página para recargarla.",
|
||||||
|
"useSlidersForAll": "Utilice controles deslizantes para todas las opciones"
|
||||||
},
|
},
|
||||||
"toast": {
|
"toast": {
|
||||||
"tempFoldersEmptied": "Directorio temporal vaciado",
|
"tempFoldersEmptied": "Directorio temporal vaciado",
|
||||||
@ -431,12 +520,12 @@
|
|||||||
"feature": {
|
"feature": {
|
||||||
"prompt": "Este campo tomará todo el texto de entrada, incluidos tanto los términos de contenido como los estilísticos. Si bien se pueden incluir pesos en la solicitud, los comandos/parámetros estándar de línea de comandos no funcionarán.",
|
"prompt": "Este campo tomará todo el texto de entrada, incluidos tanto los términos de contenido como los estilísticos. Si bien se pueden incluir pesos en la solicitud, los comandos/parámetros estándar de línea de comandos no funcionarán.",
|
||||||
"gallery": "Conforme se generan nuevas invocaciones, los archivos del directorio de salida se mostrarán aquí. Las generaciones tienen opciones adicionales para configurar nuevas generaciones.",
|
"gallery": "Conforme se generan nuevas invocaciones, los archivos del directorio de salida se mostrarán aquí. Las generaciones tienen opciones adicionales para configurar nuevas generaciones.",
|
||||||
"other": "Estas opciones habilitarán modos de procesamiento alternativos para Invoke. El modo sin costuras funciona para generar patrones repetitivos en la salida. La optimización de alta resolución realiza un ciclo de generación de dos pasos y debe usarse en resoluciones más altas cuando desee una imagen/composición más coherente.",
|
"other": "Estas opciones habilitarán modos de procesamiento alternativos para Invoke. 'Seamless mosaico' creará patrones repetitivos en la salida. 'Alta resolución' es la generación en dos pasos con img2img: use esta configuración cuando desee una imagen más grande y más coherente sin artefactos. tomar más tiempo de lo habitual txt2img.",
|
||||||
"seed": "Los valores de semilla proporcionan un conjunto inicial de ruido que guían el proceso de eliminación de ruido y se pueden aleatorizar o rellenar con una semilla de una invocación anterior. La función Umbral se puede usar para mitigar resultados indeseables a valores CFG más altos (intente entre 0-10), y Perlin se puede usar para agregar ruido Perlin al proceso de eliminación de ruido. Ambos sirven para agregar variación a sus salidas.",
|
"seed": "Los valores de semilla proporcionan un conjunto inicial de ruido que guían el proceso de eliminación de ruido y se pueden aleatorizar o rellenar con una semilla de una invocación anterior. La función Umbral se puede usar para mitigar resultados indeseables a valores CFG más altos (intente entre 0-10), y Perlin se puede usar para agregar ruido Perlin al proceso de eliminación de ruido. Ambos sirven para agregar variación a sus salidas.",
|
||||||
"variations": "Pruebe una variación con una cantidad entre 0 y 1 para cambiar la imagen de salida para la semilla establecida. Se encuentran variaciones interesantes en la semilla entre 0.1 y 0.3.",
|
"variations": "Pruebe una variación con una cantidad entre 0 y 1 para cambiar la imagen de salida para la semilla establecida. Se encuentran variaciones interesantes en la semilla entre 0.1 y 0.3.",
|
||||||
"upscale": "Usando ESRGAN, puede aumentar la resolución de salida sin requerir un ancho/alto más alto en la generación inicial.",
|
"upscale": "Usando ESRGAN, puede aumentar la resolución de salida sin requerir un ancho/alto más alto en la generación inicial.",
|
||||||
"faceCorrection": "Usando GFPGAN o Codeformer, la corrección de rostros intentará identificar rostros en las salidas y corregir cualquier defecto/anormalidad. Los valores de fuerza más altos aplicarán una presión correctiva más fuerte en las salidas, lo que resultará en rostros más atractivos. Con Codeformer, una mayor fidelidad intentará preservar la imagen original, a expensas de la fuerza de corrección de rostros.",
|
"faceCorrection": "Usando GFPGAN o Codeformer, la corrección de rostros intentará identificar rostros en las salidas y corregir cualquier defecto/anormalidad. Los valores de fuerza más altos aplicarán una presión correctiva más fuerte en las salidas, lo que resultará en rostros más atractivos. Con Codeformer, una mayor fidelidad intentará preservar la imagen original, a expensas de la fuerza de corrección de rostros.",
|
||||||
"imageToImage": "Imagen a Imagen permite cargar una imagen inicial, que InvokeAI usará para guiar el proceso de generación, junto con una solicitud. Un valor más bajo para esta configuración se parecerá más a la imagen original. Se aceptan valores entre 0-1, y se recomienda un rango de .25-.75.",
|
"imageToImage": "Imagen a Imagen permite cargar una imagen inicial, que InvokeAI usará para guiar el proceso de generación, junto con una solicitud. Un valor más bajo para esta configuración se parecerá más a la imagen original. Se aceptan valores entre 0-1, y se recomienda un rango de .25-.75",
|
||||||
"boundingBox": "La caja delimitadora es análoga a las configuraciones de Ancho y Alto para Texto a Imagen o Imagen a Imagen. Solo se procesará el área en la caja.",
|
"boundingBox": "La caja delimitadora es análoga a las configuraciones de Ancho y Alto para Texto a Imagen o Imagen a Imagen. Solo se procesará el área en la caja.",
|
||||||
"seamCorrection": "Controla el manejo de parches visibles que pueden ocurrir cuando se pega una imagen generada de nuevo en el lienzo.",
|
"seamCorrection": "Controla el manejo de parches visibles que pueden ocurrir cuando se pega una imagen generada de nuevo en el lienzo.",
|
||||||
"infillAndScaling": "Administra los métodos de relleno (utilizados en áreas enmascaradas o borradas del lienzo) y la escala (útil para tamaños de caja delimitadora pequeños)."
|
"infillAndScaling": "Administra los métodos de relleno (utilizados en áreas enmascaradas o borradas del lienzo) y la escala (útil para tamaños de caja delimitadora pequeños)."
|
||||||
|
76
invokeai/frontend/dist/locales/pt_BR.json
vendored
76
invokeai/frontend/dist/locales/pt_BR.json
vendored
@ -44,7 +44,26 @@
|
|||||||
"statusUpscaling": "Redimensinando",
|
"statusUpscaling": "Redimensinando",
|
||||||
"statusUpscalingESRGAN": "Redimensinando (ESRGAN)",
|
"statusUpscalingESRGAN": "Redimensinando (ESRGAN)",
|
||||||
"statusLoadingModel": "Carregando Modelo",
|
"statusLoadingModel": "Carregando Modelo",
|
||||||
"statusModelChanged": "Modelo Alterado"
|
"statusModelChanged": "Modelo Alterado",
|
||||||
|
"githubLabel": "Github",
|
||||||
|
"discordLabel": "Discord",
|
||||||
|
"langArabic": "Árabe",
|
||||||
|
"langEnglish": "Inglês",
|
||||||
|
"langDutch": "Holandês",
|
||||||
|
"langFrench": "Francês",
|
||||||
|
"langGerman": "Alemão",
|
||||||
|
"langItalian": "Italiano",
|
||||||
|
"langJapanese": "Japonês",
|
||||||
|
"langPolish": "Polonês",
|
||||||
|
"langSimplifiedChinese": "Chinês",
|
||||||
|
"langUkranian": "Ucraniano",
|
||||||
|
"back": "Voltar",
|
||||||
|
"statusConvertingModel": "Convertendo Modelo",
|
||||||
|
"statusModelConverted": "Modelo Convertido",
|
||||||
|
"statusMergingModels": "Mesclando Modelos",
|
||||||
|
"statusMergedModels": "Modelos Mesclados",
|
||||||
|
"langRussian": "Russo",
|
||||||
|
"langSpanish": "Espanhol"
|
||||||
},
|
},
|
||||||
"gallery": {
|
"gallery": {
|
||||||
"generations": "Gerações",
|
"generations": "Gerações",
|
||||||
@ -237,7 +256,7 @@
|
|||||||
"desc": "Salva a tela atual na galeria"
|
"desc": "Salva a tela atual na galeria"
|
||||||
},
|
},
|
||||||
"copyToClipboard": {
|
"copyToClipboard": {
|
||||||
"title": "Copiar Para a Área de Transferência ",
|
"title": "Copiar para a Área de Transferência",
|
||||||
"desc": "Copia a tela atual para a área de transferência"
|
"desc": "Copia a tela atual para a área de transferência"
|
||||||
},
|
},
|
||||||
"downloadImage": {
|
"downloadImage": {
|
||||||
@ -284,7 +303,7 @@
|
|||||||
"nameValidationMsg": "Insira um nome para o seu modelo",
|
"nameValidationMsg": "Insira um nome para o seu modelo",
|
||||||
"description": "Descrição",
|
"description": "Descrição",
|
||||||
"descriptionValidationMsg": "Adicione uma descrição para o seu modelo",
|
"descriptionValidationMsg": "Adicione uma descrição para o seu modelo",
|
||||||
"config": "Config",
|
"config": "Configuração",
|
||||||
"configValidationMsg": "Caminho para o arquivo de configuração do seu modelo.",
|
"configValidationMsg": "Caminho para o arquivo de configuração do seu modelo.",
|
||||||
"modelLocation": "Localização do modelo",
|
"modelLocation": "Localização do modelo",
|
||||||
"modelLocationValidationMsg": "Caminho para onde seu modelo está localizado.",
|
"modelLocationValidationMsg": "Caminho para onde seu modelo está localizado.",
|
||||||
@ -317,7 +336,52 @@
|
|||||||
"deleteModel": "Excluir modelo",
|
"deleteModel": "Excluir modelo",
|
||||||
"deleteConfig": "Excluir Config",
|
"deleteConfig": "Excluir Config",
|
||||||
"deleteMsg1": "Tem certeza de que deseja excluir esta entrada do modelo de InvokeAI?",
|
"deleteMsg1": "Tem certeza de que deseja excluir esta entrada do modelo de InvokeAI?",
|
||||||
"deleteMsg2": "Isso não vai excluir o arquivo de modelo checkpoint do seu disco. Você pode lê-los, se desejar."
|
"deleteMsg2": "Isso não vai excluir o arquivo de modelo checkpoint do seu disco. Você pode lê-los, se desejar.",
|
||||||
|
"checkpointModels": "Checkpoints",
|
||||||
|
"diffusersModels": "Diffusers",
|
||||||
|
"safetensorModels": "SafeTensors",
|
||||||
|
"addCheckpointModel": "Adicionar Modelo de Checkpoint/Safetensor",
|
||||||
|
"addDiffuserModel": "Adicionar Diffusers",
|
||||||
|
"repo_id": "Repo ID",
|
||||||
|
"vaeRepoID": "VAE Repo ID",
|
||||||
|
"vaeRepoIDValidationMsg": "Repositório Online do seu VAE",
|
||||||
|
"scanAgain": "Digitalize Novamente",
|
||||||
|
"selectAndAdd": "Selecione e Adicione Modelos Listados Abaixo",
|
||||||
|
"noModelsFound": "Nenhum Modelo Encontrado",
|
||||||
|
"formMessageDiffusersModelLocation": "Localização dos Modelos Diffusers",
|
||||||
|
"formMessageDiffusersModelLocationDesc": "Por favor entre com ao menos um.",
|
||||||
|
"formMessageDiffusersVAELocation": "Localização do VAE",
|
||||||
|
"formMessageDiffusersVAELocationDesc": "Se não provido, InvokeAI irá procurar pelo arquivo VAE dentro do local do modelo.",
|
||||||
|
"convertToDiffusers": "Converter para Diffusers",
|
||||||
|
"convertToDiffusersHelpText1": "Este modelo será convertido para o formato 🧨 Diffusers.",
|
||||||
|
"convertToDiffusersHelpText5": "Por favor, certifique-se de que você tenha espaço suficiente em disco. Os modelos geralmente variam entre 4GB e 7GB de tamanho.",
|
||||||
|
"convertToDiffusersHelpText6": "Você deseja converter este modelo?",
|
||||||
|
"convertToDiffusersSaveLocation": "Local para Salvar",
|
||||||
|
"v1": "v1",
|
||||||
|
"v2": "v2",
|
||||||
|
"inpainting": "v1 Inpainting",
|
||||||
|
"customConfig": "Configuração personalizada",
|
||||||
|
"pathToCustomConfig": "Caminho para configuração personalizada",
|
||||||
|
"convertToDiffusersHelpText3": "Seu arquivo de ponto de verificação no disco NÃO será excluído ou modificado de forma alguma. Você pode adicionar seu ponto de verificação ao Gerenciador de modelos novamente, se desejar.",
|
||||||
|
"convertToDiffusersHelpText4": "Este é um processo único. Pode levar cerca de 30 a 60s, dependendo das especificações do seu computador.",
|
||||||
|
"merge": "Mesclar",
|
||||||
|
"modelsMerged": "Modelos mesclados",
|
||||||
|
"mergeModels": "Mesclar modelos",
|
||||||
|
"modelOne": "Modelo 1",
|
||||||
|
"modelTwo": "Modelo 2",
|
||||||
|
"modelThree": "Modelo 3",
|
||||||
|
"statusConverting": "Convertendo",
|
||||||
|
"modelConverted": "Modelo Convertido",
|
||||||
|
"sameFolder": "Mesma pasta",
|
||||||
|
"invokeRoot": "Pasta do InvokeAI",
|
||||||
|
"custom": "Personalizado",
|
||||||
|
"customSaveLocation": "Local de salvamento personalizado",
|
||||||
|
"mergedModelName": "Nome do modelo mesclado",
|
||||||
|
"alpha": "Alpha",
|
||||||
|
"allModels": "Todos os Modelos",
|
||||||
|
"repoIDValidationMsg": "Repositório Online do seu Modelo",
|
||||||
|
"convert": "Converter",
|
||||||
|
"convertToDiffusersHelpText2": "Este processo irá substituir sua entrada de Gerenciador de Modelos por uma versão Diffusers do mesmo modelo."
|
||||||
},
|
},
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"images": "Imagems",
|
"images": "Imagems",
|
||||||
@ -442,14 +506,14 @@
|
|||||||
"move": "Mover",
|
"move": "Mover",
|
||||||
"resetView": "Resetar Visualização",
|
"resetView": "Resetar Visualização",
|
||||||
"mergeVisible": "Fundir Visível",
|
"mergeVisible": "Fundir Visível",
|
||||||
"saveToGallery": "Save To Gallery",
|
"saveToGallery": "Salvar na Galeria",
|
||||||
"copyToClipboard": "Copiar para a Área de Transferência",
|
"copyToClipboard": "Copiar para a Área de Transferência",
|
||||||
"downloadAsImage": "Baixar Como Imagem",
|
"downloadAsImage": "Baixar Como Imagem",
|
||||||
"undo": "Desfazer",
|
"undo": "Desfazer",
|
||||||
"redo": "Refazer",
|
"redo": "Refazer",
|
||||||
"clearCanvas": "Limpar Tela",
|
"clearCanvas": "Limpar Tela",
|
||||||
"canvasSettings": "Configurações de Tela",
|
"canvasSettings": "Configurações de Tela",
|
||||||
"showIntermediates": "Show Intermediates",
|
"showIntermediates": "Mostrar Intermediários",
|
||||||
"showGrid": "Mostrar Grade",
|
"showGrid": "Mostrar Grade",
|
||||||
"snapToGrid": "Encaixar na Grade",
|
"snapToGrid": "Encaixar na Grade",
|
||||||
"darkenOutsideSelection": "Escurecer Seleção Externa",
|
"darkenOutsideSelection": "Escurecer Seleção Externa",
|
||||||
|
1
invokeai/frontend/dist/locales/ro.json
vendored
Normal file
1
invokeai/frontend/dist/locales/ro.json
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
{}
|
@ -63,7 +63,8 @@
|
|||||||
"statusConvertingModel": "Converting Model",
|
"statusConvertingModel": "Converting Model",
|
||||||
"statusModelConverted": "Model Converted",
|
"statusModelConverted": "Model Converted",
|
||||||
"statusMergingModels": "Merging Models",
|
"statusMergingModels": "Merging Models",
|
||||||
"statusMergedModels": "Models Merged"
|
"statusMergedModels": "Models Merged",
|
||||||
|
"pinOptionsPanel": "Pin Options Panel"
|
||||||
},
|
},
|
||||||
"gallery": {
|
"gallery": {
|
||||||
"generations": "Generations",
|
"generations": "Generations",
|
||||||
@ -364,7 +365,8 @@
|
|||||||
"convertToDiffusersHelpText6": "Do you wish to convert this model?",
|
"convertToDiffusersHelpText6": "Do you wish to convert this model?",
|
||||||
"convertToDiffusersSaveLocation": "Save Location",
|
"convertToDiffusersSaveLocation": "Save Location",
|
||||||
"v1": "v1",
|
"v1": "v1",
|
||||||
"v2": "v2",
|
"v2_base": "v2 (512px)",
|
||||||
|
"v2_768": "v2 (768px)",
|
||||||
"inpainting": "v1 Inpainting",
|
"inpainting": "v1 Inpainting",
|
||||||
"customConfig": "Custom Config",
|
"customConfig": "Custom Config",
|
||||||
"pathToCustomConfig": "Path To Custom Config",
|
"pathToCustomConfig": "Path To Custom Config",
|
||||||
@ -393,7 +395,9 @@
|
|||||||
"modelMergeInterpAddDifferenceHelp": "In this mode, Model 3 is first subtracted from Model 2. The resulting version is blended with Model 1 with the alpha rate set above.",
|
"modelMergeInterpAddDifferenceHelp": "In this mode, Model 3 is first subtracted from Model 2. The resulting version is blended with Model 1 with the alpha rate set above.",
|
||||||
"inverseSigmoid": "Inverse Sigmoid",
|
"inverseSigmoid": "Inverse Sigmoid",
|
||||||
"sigmoid": "Sigmoid",
|
"sigmoid": "Sigmoid",
|
||||||
"weightedSum": "Weighted Sum"
|
"weightedSum": "Weighted Sum",
|
||||||
|
"none": "none",
|
||||||
|
"addDifference": "Add Difference"
|
||||||
},
|
},
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"general": "General",
|
"general": "General",
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
"langSpanish": "Español",
|
"langSpanish": "Español",
|
||||||
"nodesDesc": "Un sistema de generación de imágenes basado en nodos, actualmente se encuentra en desarrollo. Mantente pendiente a nuestras actualizaciones acerca de esta fabulosa funcionalidad.",
|
"nodesDesc": "Un sistema de generación de imágenes basado en nodos, actualmente se encuentra en desarrollo. Mantente pendiente a nuestras actualizaciones acerca de esta fabulosa funcionalidad.",
|
||||||
"postProcessing": "Post-procesamiento",
|
"postProcessing": "Post-procesamiento",
|
||||||
"postProcessDesc1": "Invoke AI ofrece una gran variedad de funciones de post-procesamiento, El aumento de tamaño y Restauración de Rostros ya se encuentran disponibles en la interfaz web, puedes acceder desde el menú de Opciones Avanzadas en las pestañas de Texto a Imagen y de Imagen a Imagen. También puedes acceder a estas funciones directamente mediante el botón de acciones en el menú superior de la imagen actual o en el visualizador",
|
"postProcessDesc1": "Invoke AI ofrece una gran variedad de funciones de post-procesamiento, El aumento de tamaño y Restauración de Rostros ya se encuentran disponibles en la interfaz web, puedes acceder desde el menú de Opciones Avanzadas en las pestañas de Texto a Imagen y de Imagen a Imagen. También puedes acceder a estas funciones directamente mediante el botón de acciones en el menú superior de la imagen actual o en el visualizador.",
|
||||||
"postProcessDesc2": "Una interfaz de usuario dedicada se lanzará pronto para facilitar flujos de trabajo de postprocesamiento más avanzado.",
|
"postProcessDesc2": "Una interfaz de usuario dedicada se lanzará pronto para facilitar flujos de trabajo de postprocesamiento más avanzado.",
|
||||||
"postProcessDesc3": "La Interfaz de Línea de Comandos de Invoke AI ofrece muchas otras características, incluyendo -Embiggen-.",
|
"postProcessDesc3": "La Interfaz de Línea de Comandos de Invoke AI ofrece muchas otras características, incluyendo -Embiggen-.",
|
||||||
"training": "Entrenamiento",
|
"training": "Entrenamiento",
|
||||||
@ -44,7 +44,26 @@
|
|||||||
"statusUpscaling": "Aumentando Tamaño",
|
"statusUpscaling": "Aumentando Tamaño",
|
||||||
"statusUpscalingESRGAN": "Restaurando Rostros(ESRGAN)",
|
"statusUpscalingESRGAN": "Restaurando Rostros(ESRGAN)",
|
||||||
"statusLoadingModel": "Cargando Modelo",
|
"statusLoadingModel": "Cargando Modelo",
|
||||||
"statusModelChanged": "Modelo cambiado"
|
"statusModelChanged": "Modelo cambiado",
|
||||||
|
"statusMergedModels": "Modelos combinados",
|
||||||
|
"githubLabel": "Github",
|
||||||
|
"discordLabel": "Discord",
|
||||||
|
"langEnglish": "Inglés",
|
||||||
|
"langDutch": "Holandés",
|
||||||
|
"langFrench": "Francés",
|
||||||
|
"langGerman": "Alemán",
|
||||||
|
"langItalian": "Italiano",
|
||||||
|
"langArabic": "Árabe",
|
||||||
|
"langJapanese": "Japones",
|
||||||
|
"langPolish": "Polaco",
|
||||||
|
"langBrPortuguese": "Portugués brasileño",
|
||||||
|
"langRussian": "Ruso",
|
||||||
|
"langSimplifiedChinese": "Chino simplificado",
|
||||||
|
"langUkranian": "Ucraniano",
|
||||||
|
"back": "Atrás",
|
||||||
|
"statusConvertingModel": "Convertir el modelo",
|
||||||
|
"statusModelConverted": "Modelo adaptado",
|
||||||
|
"statusMergingModels": "Fusionar modelos"
|
||||||
},
|
},
|
||||||
"gallery": {
|
"gallery": {
|
||||||
"generations": "Generaciones",
|
"generations": "Generaciones",
|
||||||
@ -284,16 +303,16 @@
|
|||||||
"nameValidationMsg": "Introduce un nombre para tu modelo",
|
"nameValidationMsg": "Introduce un nombre para tu modelo",
|
||||||
"description": "Descripción",
|
"description": "Descripción",
|
||||||
"descriptionValidationMsg": "Introduce una descripción para tu modelo",
|
"descriptionValidationMsg": "Introduce una descripción para tu modelo",
|
||||||
"config": "Config",
|
"config": "Configurar",
|
||||||
"configValidationMsg": "Ruta del archivo de configuración del modelo",
|
"configValidationMsg": "Ruta del archivo de configuración del modelo.",
|
||||||
"modelLocation": "Ubicación del Modelo",
|
"modelLocation": "Ubicación del Modelo",
|
||||||
"modelLocationValidationMsg": "Ruta del archivo de modelo",
|
"modelLocationValidationMsg": "Ruta del archivo de modelo.",
|
||||||
"vaeLocation": "Ubicación VAE",
|
"vaeLocation": "Ubicación VAE",
|
||||||
"vaeLocationValidationMsg": "Ruta del archivo VAE",
|
"vaeLocationValidationMsg": "Ruta del archivo VAE.",
|
||||||
"width": "Ancho",
|
"width": "Ancho",
|
||||||
"widthValidationMsg": "Ancho predeterminado de tu modelo",
|
"widthValidationMsg": "Ancho predeterminado de tu modelo.",
|
||||||
"height": "Alto",
|
"height": "Alto",
|
||||||
"heightValidationMsg": "Alto predeterminado de tu modelo",
|
"heightValidationMsg": "Alto predeterminado de tu modelo.",
|
||||||
"addModel": "Añadir Modelo",
|
"addModel": "Añadir Modelo",
|
||||||
"updateModel": "Actualizar Modelo",
|
"updateModel": "Actualizar Modelo",
|
||||||
"availableModels": "Modelos disponibles",
|
"availableModels": "Modelos disponibles",
|
||||||
@ -320,7 +339,61 @@
|
|||||||
"deleteModel": "Eliminar Modelo",
|
"deleteModel": "Eliminar Modelo",
|
||||||
"deleteConfig": "Eliminar Configuración",
|
"deleteConfig": "Eliminar Configuración",
|
||||||
"deleteMsg1": "¿Estás seguro de querer eliminar esta entrada de modelo de InvokeAI?",
|
"deleteMsg1": "¿Estás seguro de querer eliminar esta entrada de modelo de InvokeAI?",
|
||||||
"deleteMsg2": "El checkpoint del modelo no se eliminará de tu disco. Puedes volver a añadirlo si lo deseas."
|
"deleteMsg2": "El checkpoint del modelo no se eliminará de tu disco. Puedes volver a añadirlo si lo deseas.",
|
||||||
|
"safetensorModels": "SafeTensors",
|
||||||
|
"addDiffuserModel": "Añadir difusores",
|
||||||
|
"inpainting": "v1 Repintado",
|
||||||
|
"repoIDValidationMsg": "Repositorio en línea de tu modelo",
|
||||||
|
"checkpointModels": "Puntos de control",
|
||||||
|
"convertToDiffusersHelpText4": "Este proceso se realiza una sola vez. Puede tardar entre 30 y 60 segundos dependiendo de las especificaciones de tu ordenador.",
|
||||||
|
"diffusersModels": "Difusores",
|
||||||
|
"addCheckpointModel": "Agregar modelo de punto de control/Modelo Safetensor",
|
||||||
|
"vaeRepoID": "Identificador del repositorio de VAE",
|
||||||
|
"vaeRepoIDValidationMsg": "Repositorio en línea de tú VAE",
|
||||||
|
"formMessageDiffusersModelLocation": "Difusores Modelo Ubicación",
|
||||||
|
"formMessageDiffusersModelLocationDesc": "Por favor, introduzca al menos uno.",
|
||||||
|
"formMessageDiffusersVAELocation": "Ubicación VAE",
|
||||||
|
"formMessageDiffusersVAELocationDesc": "Si no se proporciona, InvokeAI buscará el archivo VAE dentro de la ubicación del modelo indicada anteriormente.",
|
||||||
|
"convert": "Convertir",
|
||||||
|
"convertToDiffusers": "Convertir en difusores",
|
||||||
|
"convertToDiffusersHelpText1": "Este modelo se convertirá al formato 🧨 Difusores.",
|
||||||
|
"convertToDiffusersHelpText2": "Este proceso sustituirá su entrada del Gestor de Modelos por la versión de Difusores del mismo modelo.",
|
||||||
|
"convertToDiffusersHelpText3": "Su archivo de puntos de control en el disco NO será borrado ni modificado de ninguna manera. Puede volver a añadir su punto de control al Gestor de Modelos si lo desea.",
|
||||||
|
"convertToDiffusersHelpText5": "Asegúrese de que dispone de suficiente espacio en disco. Los modelos suelen variar entre 4 GB y 7 GB de tamaño.",
|
||||||
|
"convertToDiffusersHelpText6": "¿Desea transformar este modelo?",
|
||||||
|
"convertToDiffusersSaveLocation": "Guardar ubicación",
|
||||||
|
"v1": "v1",
|
||||||
|
"v2": "v2",
|
||||||
|
"statusConverting": "Adaptar",
|
||||||
|
"modelConverted": "Modelo adaptado",
|
||||||
|
"sameFolder": "La misma carpeta",
|
||||||
|
"invokeRoot": "Carpeta InvokeAI",
|
||||||
|
"custom": "Personalizado",
|
||||||
|
"customSaveLocation": "Ubicación personalizada para guardar",
|
||||||
|
"merge": "Fusión",
|
||||||
|
"modelsMerged": "Modelos fusionados",
|
||||||
|
"mergeModels": "Combinar modelos",
|
||||||
|
"modelOne": "Modelo 1",
|
||||||
|
"modelTwo": "Modelo 2",
|
||||||
|
"modelThree": "Modelo 3",
|
||||||
|
"mergedModelName": "Nombre del modelo combinado",
|
||||||
|
"alpha": "Alfa",
|
||||||
|
"interpolationType": "Tipo de interpolación",
|
||||||
|
"mergedModelSaveLocation": "Guardar ubicación",
|
||||||
|
"mergedModelCustomSaveLocation": "Ruta personalizada",
|
||||||
|
"invokeAIFolder": "Invocar carpeta de la inteligencia artificial",
|
||||||
|
"modelMergeHeaderHelp2": "Sólo se pueden fusionar difusores. Si desea fusionar un modelo de punto de control, conviértalo primero en difusores.",
|
||||||
|
"modelMergeAlphaHelp": "Alfa controla la fuerza de mezcla de los modelos. Los valores alfa más bajos reducen la influencia del segundo modelo.",
|
||||||
|
"modelMergeInterpAddDifferenceHelp": "En este modo, el Modelo 3 se sustrae primero del Modelo 2. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente. La versión resultante se mezcla con el Modelo 1 con la tasa alfa establecida anteriormente.",
|
||||||
|
"ignoreMismatch": "Ignorar discrepancias entre modelos seleccionados",
|
||||||
|
"modelMergeHeaderHelp1": "Puede combinar hasta tres modelos diferentes para crear una mezcla que se adapte a sus necesidades.",
|
||||||
|
"inverseSigmoid": "Sigmoideo inverso",
|
||||||
|
"weightedSum": "Modelo de suma ponderada",
|
||||||
|
"sigmoid": "Función sigmoide",
|
||||||
|
"allModels": "Todos los modelos",
|
||||||
|
"repo_id": "Identificador del repositorio",
|
||||||
|
"pathToCustomConfig": "Ruta a la configuración personalizada",
|
||||||
|
"customConfig": "Configuración personalizada"
|
||||||
},
|
},
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"images": "Imágenes",
|
"images": "Imágenes",
|
||||||
@ -380,7 +453,22 @@
|
|||||||
"info": "Información",
|
"info": "Información",
|
||||||
"deleteImage": "Eliminar Imagen",
|
"deleteImage": "Eliminar Imagen",
|
||||||
"initialImage": "Imagen Inicial",
|
"initialImage": "Imagen Inicial",
|
||||||
"showOptionsPanel": "Mostrar panel de opciones"
|
"showOptionsPanel": "Mostrar panel de opciones",
|
||||||
|
"symmetry": "Simetría",
|
||||||
|
"vSymmetryStep": "Paso de simetría V",
|
||||||
|
"hSymmetryStep": "Paso de simetría H",
|
||||||
|
"cancel": {
|
||||||
|
"immediate": "Cancelar inmediatamente",
|
||||||
|
"schedule": "Cancelar tras la iteración actual",
|
||||||
|
"isScheduled": "Cancelando",
|
||||||
|
"setType": "Tipo de cancelación"
|
||||||
|
},
|
||||||
|
"copyImage": "Copiar la imagen",
|
||||||
|
"general": "General",
|
||||||
|
"negativePrompts": "Preguntas negativas",
|
||||||
|
"imageToImage": "Imagen a imagen",
|
||||||
|
"denoisingStrength": "Intensidad de la eliminación del ruido",
|
||||||
|
"hiresStrength": "Alta resistencia"
|
||||||
},
|
},
|
||||||
"settings": {
|
"settings": {
|
||||||
"models": "Modelos",
|
"models": "Modelos",
|
||||||
@ -393,7 +481,8 @@
|
|||||||
"resetWebUI": "Restablecer interfaz web",
|
"resetWebUI": "Restablecer interfaz web",
|
||||||
"resetWebUIDesc1": "Al restablecer la interfaz web, solo se restablece la caché local del navegador de sus imágenes y la configuración guardada. No se elimina ninguna imagen de su disco duro.",
|
"resetWebUIDesc1": "Al restablecer la interfaz web, solo se restablece la caché local del navegador de sus imágenes y la configuración guardada. No se elimina ninguna imagen de su disco duro.",
|
||||||
"resetWebUIDesc2": "Si las imágenes no se muestran en la galería o algo más no funciona, intente restablecer antes de reportar un incidente en GitHub.",
|
"resetWebUIDesc2": "Si las imágenes no se muestran en la galería o algo más no funciona, intente restablecer antes de reportar un incidente en GitHub.",
|
||||||
"resetComplete": "La interfaz web se ha restablecido. Actualice la página para recargarla."
|
"resetComplete": "La interfaz web se ha restablecido. Actualice la página para recargarla.",
|
||||||
|
"useSlidersForAll": "Utilice controles deslizantes para todas las opciones"
|
||||||
},
|
},
|
||||||
"toast": {
|
"toast": {
|
||||||
"tempFoldersEmptied": "Directorio temporal vaciado",
|
"tempFoldersEmptied": "Directorio temporal vaciado",
|
||||||
@ -431,12 +520,12 @@
|
|||||||
"feature": {
|
"feature": {
|
||||||
"prompt": "Este campo tomará todo el texto de entrada, incluidos tanto los términos de contenido como los estilísticos. Si bien se pueden incluir pesos en la solicitud, los comandos/parámetros estándar de línea de comandos no funcionarán.",
|
"prompt": "Este campo tomará todo el texto de entrada, incluidos tanto los términos de contenido como los estilísticos. Si bien se pueden incluir pesos en la solicitud, los comandos/parámetros estándar de línea de comandos no funcionarán.",
|
||||||
"gallery": "Conforme se generan nuevas invocaciones, los archivos del directorio de salida se mostrarán aquí. Las generaciones tienen opciones adicionales para configurar nuevas generaciones.",
|
"gallery": "Conforme se generan nuevas invocaciones, los archivos del directorio de salida se mostrarán aquí. Las generaciones tienen opciones adicionales para configurar nuevas generaciones.",
|
||||||
"other": "Estas opciones habilitarán modos de procesamiento alternativos para Invoke. El modo sin costuras funciona para generar patrones repetitivos en la salida. La optimización de alta resolución realiza un ciclo de generación de dos pasos y debe usarse en resoluciones más altas cuando desee una imagen/composición más coherente.",
|
"other": "Estas opciones habilitarán modos de procesamiento alternativos para Invoke. 'Seamless mosaico' creará patrones repetitivos en la salida. 'Alta resolución' es la generación en dos pasos con img2img: use esta configuración cuando desee una imagen más grande y más coherente sin artefactos. tomar más tiempo de lo habitual txt2img.",
|
||||||
"seed": "Los valores de semilla proporcionan un conjunto inicial de ruido que guían el proceso de eliminación de ruido y se pueden aleatorizar o rellenar con una semilla de una invocación anterior. La función Umbral se puede usar para mitigar resultados indeseables a valores CFG más altos (intente entre 0-10), y Perlin se puede usar para agregar ruido Perlin al proceso de eliminación de ruido. Ambos sirven para agregar variación a sus salidas.",
|
"seed": "Los valores de semilla proporcionan un conjunto inicial de ruido que guían el proceso de eliminación de ruido y se pueden aleatorizar o rellenar con una semilla de una invocación anterior. La función Umbral se puede usar para mitigar resultados indeseables a valores CFG más altos (intente entre 0-10), y Perlin se puede usar para agregar ruido Perlin al proceso de eliminación de ruido. Ambos sirven para agregar variación a sus salidas.",
|
||||||
"variations": "Pruebe una variación con una cantidad entre 0 y 1 para cambiar la imagen de salida para la semilla establecida. Se encuentran variaciones interesantes en la semilla entre 0.1 y 0.3.",
|
"variations": "Pruebe una variación con una cantidad entre 0 y 1 para cambiar la imagen de salida para la semilla establecida. Se encuentran variaciones interesantes en la semilla entre 0.1 y 0.3.",
|
||||||
"upscale": "Usando ESRGAN, puede aumentar la resolución de salida sin requerir un ancho/alto más alto en la generación inicial.",
|
"upscale": "Usando ESRGAN, puede aumentar la resolución de salida sin requerir un ancho/alto más alto en la generación inicial.",
|
||||||
"faceCorrection": "Usando GFPGAN o Codeformer, la corrección de rostros intentará identificar rostros en las salidas y corregir cualquier defecto/anormalidad. Los valores de fuerza más altos aplicarán una presión correctiva más fuerte en las salidas, lo que resultará en rostros más atractivos. Con Codeformer, una mayor fidelidad intentará preservar la imagen original, a expensas de la fuerza de corrección de rostros.",
|
"faceCorrection": "Usando GFPGAN o Codeformer, la corrección de rostros intentará identificar rostros en las salidas y corregir cualquier defecto/anormalidad. Los valores de fuerza más altos aplicarán una presión correctiva más fuerte en las salidas, lo que resultará en rostros más atractivos. Con Codeformer, una mayor fidelidad intentará preservar la imagen original, a expensas de la fuerza de corrección de rostros.",
|
||||||
"imageToImage": "Imagen a Imagen permite cargar una imagen inicial, que InvokeAI usará para guiar el proceso de generación, junto con una solicitud. Un valor más bajo para esta configuración se parecerá más a la imagen original. Se aceptan valores entre 0-1, y se recomienda un rango de .25-.75.",
|
"imageToImage": "Imagen a Imagen permite cargar una imagen inicial, que InvokeAI usará para guiar el proceso de generación, junto con una solicitud. Un valor más bajo para esta configuración se parecerá más a la imagen original. Se aceptan valores entre 0-1, y se recomienda un rango de .25-.75",
|
||||||
"boundingBox": "La caja delimitadora es análoga a las configuraciones de Ancho y Alto para Texto a Imagen o Imagen a Imagen. Solo se procesará el área en la caja.",
|
"boundingBox": "La caja delimitadora es análoga a las configuraciones de Ancho y Alto para Texto a Imagen o Imagen a Imagen. Solo se procesará el área en la caja.",
|
||||||
"seamCorrection": "Controla el manejo de parches visibles que pueden ocurrir cuando se pega una imagen generada de nuevo en el lienzo.",
|
"seamCorrection": "Controla el manejo de parches visibles que pueden ocurrir cuando se pega una imagen generada de nuevo en el lienzo.",
|
||||||
"infillAndScaling": "Administra los métodos de relleno (utilizados en áreas enmascaradas o borradas del lienzo) y la escala (útil para tamaños de caja delimitadora pequeños)."
|
"infillAndScaling": "Administra los métodos de relleno (utilizados en áreas enmascaradas o borradas del lienzo) y la escala (útil para tamaños de caja delimitadora pequeños)."
|
||||||
|
@ -44,7 +44,26 @@
|
|||||||
"statusUpscaling": "Redimensinando",
|
"statusUpscaling": "Redimensinando",
|
||||||
"statusUpscalingESRGAN": "Redimensinando (ESRGAN)",
|
"statusUpscalingESRGAN": "Redimensinando (ESRGAN)",
|
||||||
"statusLoadingModel": "Carregando Modelo",
|
"statusLoadingModel": "Carregando Modelo",
|
||||||
"statusModelChanged": "Modelo Alterado"
|
"statusModelChanged": "Modelo Alterado",
|
||||||
|
"githubLabel": "Github",
|
||||||
|
"discordLabel": "Discord",
|
||||||
|
"langArabic": "Árabe",
|
||||||
|
"langEnglish": "Inglês",
|
||||||
|
"langDutch": "Holandês",
|
||||||
|
"langFrench": "Francês",
|
||||||
|
"langGerman": "Alemão",
|
||||||
|
"langItalian": "Italiano",
|
||||||
|
"langJapanese": "Japonês",
|
||||||
|
"langPolish": "Polonês",
|
||||||
|
"langSimplifiedChinese": "Chinês",
|
||||||
|
"langUkranian": "Ucraniano",
|
||||||
|
"back": "Voltar",
|
||||||
|
"statusConvertingModel": "Convertendo Modelo",
|
||||||
|
"statusModelConverted": "Modelo Convertido",
|
||||||
|
"statusMergingModels": "Mesclando Modelos",
|
||||||
|
"statusMergedModels": "Modelos Mesclados",
|
||||||
|
"langRussian": "Russo",
|
||||||
|
"langSpanish": "Espanhol"
|
||||||
},
|
},
|
||||||
"gallery": {
|
"gallery": {
|
||||||
"generations": "Gerações",
|
"generations": "Gerações",
|
||||||
@ -237,7 +256,7 @@
|
|||||||
"desc": "Salva a tela atual na galeria"
|
"desc": "Salva a tela atual na galeria"
|
||||||
},
|
},
|
||||||
"copyToClipboard": {
|
"copyToClipboard": {
|
||||||
"title": "Copiar Para a Área de Transferência ",
|
"title": "Copiar para a Área de Transferência",
|
||||||
"desc": "Copia a tela atual para a área de transferência"
|
"desc": "Copia a tela atual para a área de transferência"
|
||||||
},
|
},
|
||||||
"downloadImage": {
|
"downloadImage": {
|
||||||
@ -284,7 +303,7 @@
|
|||||||
"nameValidationMsg": "Insira um nome para o seu modelo",
|
"nameValidationMsg": "Insira um nome para o seu modelo",
|
||||||
"description": "Descrição",
|
"description": "Descrição",
|
||||||
"descriptionValidationMsg": "Adicione uma descrição para o seu modelo",
|
"descriptionValidationMsg": "Adicione uma descrição para o seu modelo",
|
||||||
"config": "Config",
|
"config": "Configuração",
|
||||||
"configValidationMsg": "Caminho para o arquivo de configuração do seu modelo.",
|
"configValidationMsg": "Caminho para o arquivo de configuração do seu modelo.",
|
||||||
"modelLocation": "Localização do modelo",
|
"modelLocation": "Localização do modelo",
|
||||||
"modelLocationValidationMsg": "Caminho para onde seu modelo está localizado.",
|
"modelLocationValidationMsg": "Caminho para onde seu modelo está localizado.",
|
||||||
@ -317,7 +336,52 @@
|
|||||||
"deleteModel": "Excluir modelo",
|
"deleteModel": "Excluir modelo",
|
||||||
"deleteConfig": "Excluir Config",
|
"deleteConfig": "Excluir Config",
|
||||||
"deleteMsg1": "Tem certeza de que deseja excluir esta entrada do modelo de InvokeAI?",
|
"deleteMsg1": "Tem certeza de que deseja excluir esta entrada do modelo de InvokeAI?",
|
||||||
"deleteMsg2": "Isso não vai excluir o arquivo de modelo checkpoint do seu disco. Você pode lê-los, se desejar."
|
"deleteMsg2": "Isso não vai excluir o arquivo de modelo checkpoint do seu disco. Você pode lê-los, se desejar.",
|
||||||
|
"checkpointModels": "Checkpoints",
|
||||||
|
"diffusersModels": "Diffusers",
|
||||||
|
"safetensorModels": "SafeTensors",
|
||||||
|
"addCheckpointModel": "Adicionar Modelo de Checkpoint/Safetensor",
|
||||||
|
"addDiffuserModel": "Adicionar Diffusers",
|
||||||
|
"repo_id": "Repo ID",
|
||||||
|
"vaeRepoID": "VAE Repo ID",
|
||||||
|
"vaeRepoIDValidationMsg": "Repositório Online do seu VAE",
|
||||||
|
"scanAgain": "Digitalize Novamente",
|
||||||
|
"selectAndAdd": "Selecione e Adicione Modelos Listados Abaixo",
|
||||||
|
"noModelsFound": "Nenhum Modelo Encontrado",
|
||||||
|
"formMessageDiffusersModelLocation": "Localização dos Modelos Diffusers",
|
||||||
|
"formMessageDiffusersModelLocationDesc": "Por favor entre com ao menos um.",
|
||||||
|
"formMessageDiffusersVAELocation": "Localização do VAE",
|
||||||
|
"formMessageDiffusersVAELocationDesc": "Se não provido, InvokeAI irá procurar pelo arquivo VAE dentro do local do modelo.",
|
||||||
|
"convertToDiffusers": "Converter para Diffusers",
|
||||||
|
"convertToDiffusersHelpText1": "Este modelo será convertido para o formato 🧨 Diffusers.",
|
||||||
|
"convertToDiffusersHelpText5": "Por favor, certifique-se de que você tenha espaço suficiente em disco. Os modelos geralmente variam entre 4GB e 7GB de tamanho.",
|
||||||
|
"convertToDiffusersHelpText6": "Você deseja converter este modelo?",
|
||||||
|
"convertToDiffusersSaveLocation": "Local para Salvar",
|
||||||
|
"v1": "v1",
|
||||||
|
"v2": "v2",
|
||||||
|
"inpainting": "v1 Inpainting",
|
||||||
|
"customConfig": "Configuração personalizada",
|
||||||
|
"pathToCustomConfig": "Caminho para configuração personalizada",
|
||||||
|
"convertToDiffusersHelpText3": "Seu arquivo de ponto de verificação no disco NÃO será excluído ou modificado de forma alguma. Você pode adicionar seu ponto de verificação ao Gerenciador de modelos novamente, se desejar.",
|
||||||
|
"convertToDiffusersHelpText4": "Este é um processo único. Pode levar cerca de 30 a 60s, dependendo das especificações do seu computador.",
|
||||||
|
"merge": "Mesclar",
|
||||||
|
"modelsMerged": "Modelos mesclados",
|
||||||
|
"mergeModels": "Mesclar modelos",
|
||||||
|
"modelOne": "Modelo 1",
|
||||||
|
"modelTwo": "Modelo 2",
|
||||||
|
"modelThree": "Modelo 3",
|
||||||
|
"statusConverting": "Convertendo",
|
||||||
|
"modelConverted": "Modelo Convertido",
|
||||||
|
"sameFolder": "Mesma pasta",
|
||||||
|
"invokeRoot": "Pasta do InvokeAI",
|
||||||
|
"custom": "Personalizado",
|
||||||
|
"customSaveLocation": "Local de salvamento personalizado",
|
||||||
|
"mergedModelName": "Nome do modelo mesclado",
|
||||||
|
"alpha": "Alpha",
|
||||||
|
"allModels": "Todos os Modelos",
|
||||||
|
"repoIDValidationMsg": "Repositório Online do seu Modelo",
|
||||||
|
"convert": "Converter",
|
||||||
|
"convertToDiffusersHelpText2": "Este processo irá substituir sua entrada de Gerenciador de Modelos por uma versão Diffusers do mesmo modelo."
|
||||||
},
|
},
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"images": "Imagems",
|
"images": "Imagems",
|
||||||
@ -442,14 +506,14 @@
|
|||||||
"move": "Mover",
|
"move": "Mover",
|
||||||
"resetView": "Resetar Visualização",
|
"resetView": "Resetar Visualização",
|
||||||
"mergeVisible": "Fundir Visível",
|
"mergeVisible": "Fundir Visível",
|
||||||
"saveToGallery": "Save To Gallery",
|
"saveToGallery": "Salvar na Galeria",
|
||||||
"copyToClipboard": "Copiar para a Área de Transferência",
|
"copyToClipboard": "Copiar para a Área de Transferência",
|
||||||
"downloadAsImage": "Baixar Como Imagem",
|
"downloadAsImage": "Baixar Como Imagem",
|
||||||
"undo": "Desfazer",
|
"undo": "Desfazer",
|
||||||
"redo": "Refazer",
|
"redo": "Refazer",
|
||||||
"clearCanvas": "Limpar Tela",
|
"clearCanvas": "Limpar Tela",
|
||||||
"canvasSettings": "Configurações de Tela",
|
"canvasSettings": "Configurações de Tela",
|
||||||
"showIntermediates": "Show Intermediates",
|
"showIntermediates": "Mostrar Intermediários",
|
||||||
"showGrid": "Mostrar Grade",
|
"showGrid": "Mostrar Grade",
|
||||||
"snapToGrid": "Encaixar na Grade",
|
"snapToGrid": "Encaixar na Grade",
|
||||||
"darkenOutsideSelection": "Escurecer Seleção Externa",
|
"darkenOutsideSelection": "Escurecer Seleção Externa",
|
||||||
|
1
invokeai/frontend/public/locales/ro.json
Normal file
1
invokeai/frontend/public/locales/ro.json
Normal file
@ -0,0 +1 @@
|
|||||||
|
{}
|
@ -392,7 +392,7 @@ const makeSocketIOListeners = (
|
|||||||
addLogEntry({
|
addLogEntry({
|
||||||
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||||
message: `${i18n.t(
|
message: `${i18n.t(
|
||||||
'modelmanager:modelAdded'
|
'modelManager.modelAdded'
|
||||||
)}: ${deleted_model_name}`,
|
)}: ${deleted_model_name}`,
|
||||||
level: 'info',
|
level: 'info',
|
||||||
})
|
})
|
||||||
@ -400,7 +400,7 @@ const makeSocketIOListeners = (
|
|||||||
dispatch(
|
dispatch(
|
||||||
addToast({
|
addToast({
|
||||||
title: `${i18n.t(
|
title: `${i18n.t(
|
||||||
'modelmanager:modelEntryDeleted'
|
'modelManager.modelEntryDeleted'
|
||||||
)}: ${deleted_model_name}`,
|
)}: ${deleted_model_name}`,
|
||||||
status: 'success',
|
status: 'success',
|
||||||
duration: 2500,
|
duration: 2500,
|
||||||
@ -424,7 +424,7 @@ const makeSocketIOListeners = (
|
|||||||
dispatch(
|
dispatch(
|
||||||
addToast({
|
addToast({
|
||||||
title: `${i18n.t(
|
title: `${i18n.t(
|
||||||
'modelmanager:modelConverted'
|
'modelManager.modelConverted'
|
||||||
)}: ${converted_model_name}`,
|
)}: ${converted_model_name}`,
|
||||||
status: 'success',
|
status: 'success',
|
||||||
duration: 2500,
|
duration: 2500,
|
||||||
|
@ -144,8 +144,8 @@ export const frontendToBackendParameters = (
|
|||||||
variationAmount,
|
variationAmount,
|
||||||
width,
|
width,
|
||||||
shouldUseSymmetry,
|
shouldUseSymmetry,
|
||||||
horizontalSymmetryTimePercentage,
|
horizontalSymmetrySteps,
|
||||||
verticalSymmetryTimePercentage,
|
verticalSymmetrySteps,
|
||||||
} = generationState;
|
} = generationState;
|
||||||
|
|
||||||
const {
|
const {
|
||||||
@ -185,17 +185,17 @@ export const frontendToBackendParameters = (
|
|||||||
|
|
||||||
// Symmetry Settings
|
// Symmetry Settings
|
||||||
if (shouldUseSymmetry) {
|
if (shouldUseSymmetry) {
|
||||||
if (horizontalSymmetryTimePercentage > 0) {
|
if (horizontalSymmetrySteps > 0) {
|
||||||
generationParameters.h_symmetry_time_pct = Math.max(
|
generationParameters.h_symmetry_time_pct = Math.max(
|
||||||
0,
|
0,
|
||||||
Math.min(1, horizontalSymmetryTimePercentage / steps)
|
Math.min(1, horizontalSymmetrySteps / steps)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (horizontalSymmetryTimePercentage > 0) {
|
if (verticalSymmetrySteps > 0) {
|
||||||
generationParameters.v_symmetry_time_pct = Math.max(
|
generationParameters.v_symmetry_time_pct = Math.max(
|
||||||
0,
|
0,
|
||||||
Math.min(1, verticalSymmetryTimePercentage / steps)
|
Math.min(1, verticalSymmetrySteps / steps)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -109,7 +109,7 @@ const IAICanvasStatusText = () => {
|
|||||||
color: boundingBoxColor,
|
color: boundingBoxColor,
|
||||||
}}
|
}}
|
||||||
>{`${t(
|
>{`${t(
|
||||||
'unifiedcanvas:boundingBox'
|
'unifiedCanvas.boundingBox'
|
||||||
)}: ${boundingBoxDimensionsString}`}</div>
|
)}: ${boundingBoxDimensionsString}`}</div>
|
||||||
)}
|
)}
|
||||||
{shouldShowScaledBoundingBox && (
|
{shouldShowScaledBoundingBox && (
|
||||||
@ -118,19 +118,19 @@ const IAICanvasStatusText = () => {
|
|||||||
color: boundingBoxColor,
|
color: boundingBoxColor,
|
||||||
}}
|
}}
|
||||||
>{`${t(
|
>{`${t(
|
||||||
'unifiedcanvas:scaledBoundingBox'
|
'unifiedCanvas.scaledBoundingBox'
|
||||||
)}: ${scaledBoundingBoxDimensionsString}`}</div>
|
)}: ${scaledBoundingBoxDimensionsString}`}</div>
|
||||||
)}
|
)}
|
||||||
{shouldShowCanvasDebugInfo && (
|
{shouldShowCanvasDebugInfo && (
|
||||||
<>
|
<>
|
||||||
<div>{`${t(
|
<div>{`${t(
|
||||||
'unifiedcanvas:boundingBoxPosition'
|
'unifiedCanvas.boundingBoxPosition'
|
||||||
)}: ${boundingBoxCoordinatesString}`}</div>
|
)}: ${boundingBoxCoordinatesString}`}</div>
|
||||||
<div>{`${t(
|
<div>{`${t(
|
||||||
'unifiedcanvas:canvasDimensions'
|
'unifiedCanvas.canvasDimensions'
|
||||||
)}: ${canvasDimensionsString}`}</div>
|
)}: ${canvasDimensionsString}`}</div>
|
||||||
<div>{`${t(
|
<div>{`${t(
|
||||||
'unifiedcanvas:canvasPosition'
|
'unifiedCanvas.canvasPosition'
|
||||||
)}: ${canvasCoordinatesString}`}</div>
|
)}: ${canvasCoordinatesString}`}</div>
|
||||||
<IAICanvasStatusTextCursorPos />
|
<IAICanvasStatusTextCursorPos />
|
||||||
</>
|
</>
|
||||||
|
@ -34,7 +34,7 @@ export default function IAICanvasStatusTextCursorPos() {
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
<div>{`${t(
|
<div>{`${t(
|
||||||
'unifiedcanvas:cursorPosition'
|
'unifiedCanvas.cursorPosition'
|
||||||
)}: ${cursorCoordinatesString}`}</div>
|
)}: ${cursorCoordinatesString}`}</div>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@ import {
|
|||||||
setInitialImage,
|
setInitialImage,
|
||||||
setSeed,
|
setSeed,
|
||||||
} from 'features/parameters/store/generationSlice';
|
} from 'features/parameters/store/generationSlice';
|
||||||
|
import { setAllPostProcessingParameters } from 'features/parameters/store/postprocessingSlice';
|
||||||
import { postprocessingSelector } from 'features/parameters/store/postprocessingSelectors';
|
import { postprocessingSelector } from 'features/parameters/store/postprocessingSelectors';
|
||||||
import { systemSelector } from 'features/system/store/systemSelectors';
|
import { systemSelector } from 'features/system/store/systemSelectors';
|
||||||
import { SystemState } from 'features/system/store/systemSlice';
|
import { SystemState } from 'features/system/store/systemSlice';
|
||||||
@ -189,11 +190,12 @@ const CurrentImageButtons = () => {
|
|||||||
);
|
);
|
||||||
|
|
||||||
const handleClickUseAllParameters = () => {
|
const handleClickUseAllParameters = () => {
|
||||||
if (!currentImage) return;
|
if (!currentImage?.metadata) return;
|
||||||
currentImage.metadata && dispatch(setAllParameters(currentImage.metadata));
|
dispatch(setAllParameters(currentImage.metadata));
|
||||||
if (currentImage.metadata?.image.type === 'img2img') {
|
dispatch(setAllPostProcessingParameters(currentImage.metadata));
|
||||||
|
if (currentImage.metadata.image.type === 'img2img') {
|
||||||
dispatch(setActiveTab('img2img'));
|
dispatch(setActiveTab('img2img'));
|
||||||
} else if (currentImage.metadata?.image.type === 'txt2img') {
|
} else if (currentImage.metadata.image.type === 'txt2img') {
|
||||||
dispatch(setActiveTab('txt2img'));
|
dispatch(setActiveTab('txt2img'));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -10,6 +10,7 @@ import {
|
|||||||
setInitialImage,
|
setInitialImage,
|
||||||
setSeed,
|
setSeed,
|
||||||
} from 'features/parameters/store/generationSlice';
|
} from 'features/parameters/store/generationSlice';
|
||||||
|
import { setAllPostProcessingParameters } from 'features/parameters/store/postprocessingSlice';
|
||||||
import { DragEvent, memo, useState } from 'react';
|
import { DragEvent, memo, useState } from 'react';
|
||||||
import { FaCheck, FaTrashAlt } from 'react-icons/fa';
|
import { FaCheck, FaTrashAlt } from 'react-icons/fa';
|
||||||
import DeleteImageModal from './DeleteImageModal';
|
import DeleteImageModal from './DeleteImageModal';
|
||||||
@ -114,7 +115,10 @@ const HoverableImage = memo((props: HoverableImageProps) => {
|
|||||||
};
|
};
|
||||||
|
|
||||||
const handleUseAllParameters = () => {
|
const handleUseAllParameters = () => {
|
||||||
metadata && dispatch(setAllParameters(metadata));
|
if (metadata) {
|
||||||
|
dispatch(setAllParameters(metadata));
|
||||||
|
dispatch(setAllPostProcessingParameters(metadata));
|
||||||
|
}
|
||||||
toast({
|
toast({
|
||||||
title: t('toast.parametersSet'),
|
title: t('toast.parametersSet'),
|
||||||
status: 'success',
|
status: 'success',
|
||||||
|
@ -2,18 +2,18 @@ import { RootState } from 'app/store';
|
|||||||
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import IAISlider from 'common/components/IAISlider';
|
import IAISlider from 'common/components/IAISlider';
|
||||||
import {
|
import {
|
||||||
setHorizontalSymmetryTimePercentage,
|
setHorizontalSymmetrySteps,
|
||||||
setVerticalSymmetryTimePercentage,
|
setVerticalSymmetrySteps,
|
||||||
} from 'features/parameters/store/generationSlice';
|
} from 'features/parameters/store/generationSlice';
|
||||||
import { useTranslation } from 'react-i18next';
|
import { useTranslation } from 'react-i18next';
|
||||||
|
|
||||||
export default function SymmetrySettings() {
|
export default function SymmetrySettings() {
|
||||||
const horizontalSymmetryTimePercentage = useAppSelector(
|
const horizontalSymmetrySteps = useAppSelector(
|
||||||
(state: RootState) => state.generation.horizontalSymmetryTimePercentage
|
(state: RootState) => state.generation.horizontalSymmetrySteps
|
||||||
);
|
);
|
||||||
|
|
||||||
const verticalSymmetryTimePercentage = useAppSelector(
|
const verticalSymmetrySteps = useAppSelector(
|
||||||
(state: RootState) => state.generation.verticalSymmetryTimePercentage
|
(state: RootState) => state.generation.verticalSymmetrySteps
|
||||||
);
|
);
|
||||||
|
|
||||||
const steps = useAppSelector((state: RootState) => state.generation.steps);
|
const steps = useAppSelector((state: RootState) => state.generation.steps);
|
||||||
@ -26,28 +26,28 @@ export default function SymmetrySettings() {
|
|||||||
<>
|
<>
|
||||||
<IAISlider
|
<IAISlider
|
||||||
label={t('parameters.hSymmetryStep')}
|
label={t('parameters.hSymmetryStep')}
|
||||||
value={horizontalSymmetryTimePercentage}
|
value={horizontalSymmetrySteps}
|
||||||
onChange={(v) => dispatch(setHorizontalSymmetryTimePercentage(v))}
|
onChange={(v) => dispatch(setHorizontalSymmetrySteps(v))}
|
||||||
min={0}
|
min={0}
|
||||||
max={steps}
|
max={steps}
|
||||||
step={1}
|
step={1}
|
||||||
withInput
|
withInput
|
||||||
withSliderMarks
|
withSliderMarks
|
||||||
withReset
|
withReset
|
||||||
handleReset={() => dispatch(setHorizontalSymmetryTimePercentage(0))}
|
handleReset={() => dispatch(setHorizontalSymmetrySteps(0))}
|
||||||
sliderMarkRightOffset={-6}
|
sliderMarkRightOffset={-6}
|
||||||
></IAISlider>
|
></IAISlider>
|
||||||
<IAISlider
|
<IAISlider
|
||||||
label={t('parameters.vSymmetryStep')}
|
label={t('parameters.vSymmetryStep')}
|
||||||
value={verticalSymmetryTimePercentage}
|
value={verticalSymmetrySteps}
|
||||||
onChange={(v) => dispatch(setVerticalSymmetryTimePercentage(v))}
|
onChange={(v) => dispatch(setVerticalSymmetrySteps(v))}
|
||||||
min={0}
|
min={0}
|
||||||
max={steps}
|
max={steps}
|
||||||
step={1}
|
step={1}
|
||||||
withInput
|
withInput
|
||||||
withSliderMarks
|
withSliderMarks
|
||||||
withReset
|
withReset
|
||||||
handleReset={() => dispatch(setVerticalSymmetryTimePercentage(0))}
|
handleReset={() => dispatch(setVerticalSymmetrySteps(0))}
|
||||||
sliderMarkRightOffset={-6}
|
sliderMarkRightOffset={-6}
|
||||||
></IAISlider>
|
></IAISlider>
|
||||||
</>
|
</>
|
||||||
|
@ -3,7 +3,10 @@ import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
|||||||
import IAINumberInput from 'common/components/IAINumberInput';
|
import IAINumberInput from 'common/components/IAINumberInput';
|
||||||
|
|
||||||
import IAISlider from 'common/components/IAISlider';
|
import IAISlider from 'common/components/IAISlider';
|
||||||
import { setSteps } from 'features/parameters/store/generationSlice';
|
import {
|
||||||
|
clampSymmetrySteps,
|
||||||
|
setSteps,
|
||||||
|
} from 'features/parameters/store/generationSlice';
|
||||||
import { useTranslation } from 'react-i18next';
|
import { useTranslation } from 'react-i18next';
|
||||||
|
|
||||||
export default function MainSteps() {
|
export default function MainSteps() {
|
||||||
@ -14,7 +17,13 @@ export default function MainSteps() {
|
|||||||
);
|
);
|
||||||
const { t } = useTranslation();
|
const { t } = useTranslation();
|
||||||
|
|
||||||
const handleChangeSteps = (v: number) => dispatch(setSteps(v));
|
const handleChangeSteps = (v: number) => {
|
||||||
|
dispatch(setSteps(v));
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleBlur = () => {
|
||||||
|
dispatch(clampSymmetrySteps());
|
||||||
|
};
|
||||||
|
|
||||||
return shouldUseSliders ? (
|
return shouldUseSliders ? (
|
||||||
<IAISlider
|
<IAISlider
|
||||||
@ -41,6 +50,7 @@ export default function MainSteps() {
|
|||||||
width="auto"
|
width="auto"
|
||||||
styleClass="main-settings-block"
|
styleClass="main-settings-block"
|
||||||
textAlign="center"
|
textAlign="center"
|
||||||
|
onBlur={handleBlur}
|
||||||
/>
|
/>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ import IAIButton, { IAIButtonProps } from 'common/components/IAIButton';
|
|||||||
import IAIIconButton, {
|
import IAIIconButton, {
|
||||||
IAIIconButtonProps,
|
IAIIconButtonProps,
|
||||||
} from 'common/components/IAIIconButton';
|
} from 'common/components/IAIIconButton';
|
||||||
|
import { clampSymmetrySteps } from 'features/parameters/store/generationSlice';
|
||||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||||
import { useHotkeys } from 'react-hotkeys-hook';
|
import { useHotkeys } from 'react-hotkeys-hook';
|
||||||
import { useTranslation } from 'react-i18next';
|
import { useTranslation } from 'react-i18next';
|
||||||
@ -30,6 +31,7 @@ export default function InvokeButton(props: InvokeButton) {
|
|||||||
useHotkeys(
|
useHotkeys(
|
||||||
['ctrl+enter', 'meta+enter'],
|
['ctrl+enter', 'meta+enter'],
|
||||||
() => {
|
() => {
|
||||||
|
dispatch(clampSymmetrySteps());
|
||||||
dispatch(generateImage(activeTabName));
|
dispatch(generateImage(activeTabName));
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -4,6 +4,7 @@ import * as InvokeAI from 'app/invokeai';
|
|||||||
import { getPromptAndNegative } from 'common/util/getPromptAndNegative';
|
import { getPromptAndNegative } from 'common/util/getPromptAndNegative';
|
||||||
import promptToString from 'common/util/promptToString';
|
import promptToString from 'common/util/promptToString';
|
||||||
import { seedWeightsToString } from 'common/util/seedWeightPairs';
|
import { seedWeightsToString } from 'common/util/seedWeightPairs';
|
||||||
|
import { clamp } from 'lodash';
|
||||||
|
|
||||||
export interface GenerationState {
|
export interface GenerationState {
|
||||||
cfgScale: number;
|
cfgScale: number;
|
||||||
@ -33,8 +34,8 @@ export interface GenerationState {
|
|||||||
variationAmount: number;
|
variationAmount: number;
|
||||||
width: number;
|
width: number;
|
||||||
shouldUseSymmetry: boolean;
|
shouldUseSymmetry: boolean;
|
||||||
horizontalSymmetryTimePercentage: number;
|
horizontalSymmetrySteps: number;
|
||||||
verticalSymmetryTimePercentage: number;
|
verticalSymmetrySteps: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
const initialGenerationState: GenerationState = {
|
const initialGenerationState: GenerationState = {
|
||||||
@ -64,8 +65,8 @@ const initialGenerationState: GenerationState = {
|
|||||||
variationAmount: 0.1,
|
variationAmount: 0.1,
|
||||||
width: 512,
|
width: 512,
|
||||||
shouldUseSymmetry: false,
|
shouldUseSymmetry: false,
|
||||||
horizontalSymmetryTimePercentage: 0,
|
horizontalSymmetrySteps: 0,
|
||||||
verticalSymmetryTimePercentage: 0,
|
verticalSymmetrySteps: 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
const initialState: GenerationState = initialGenerationState;
|
const initialState: GenerationState = initialGenerationState;
|
||||||
@ -99,6 +100,18 @@ export const generationSlice = createSlice({
|
|||||||
setSteps: (state, action: PayloadAction<number>) => {
|
setSteps: (state, action: PayloadAction<number>) => {
|
||||||
state.steps = action.payload;
|
state.steps = action.payload;
|
||||||
},
|
},
|
||||||
|
clampSymmetrySteps: (state) => {
|
||||||
|
state.horizontalSymmetrySteps = clamp(
|
||||||
|
state.horizontalSymmetrySteps,
|
||||||
|
0,
|
||||||
|
state.steps
|
||||||
|
);
|
||||||
|
state.verticalSymmetrySteps = clamp(
|
||||||
|
state.verticalSymmetrySteps,
|
||||||
|
0,
|
||||||
|
state.steps
|
||||||
|
);
|
||||||
|
},
|
||||||
setCfgScale: (state, action: PayloadAction<number>) => {
|
setCfgScale: (state, action: PayloadAction<number>) => {
|
||||||
state.cfgScale = action.payload;
|
state.cfgScale = action.payload;
|
||||||
},
|
},
|
||||||
@ -288,7 +301,6 @@ export const generationSlice = createSlice({
|
|||||||
state.perlin = perlin;
|
state.perlin = perlin;
|
||||||
}
|
}
|
||||||
if (typeof seamless === 'boolean') state.seamless = seamless;
|
if (typeof seamless === 'boolean') state.seamless = seamless;
|
||||||
// if (typeof hires_fix === 'boolean') state.hiresFix = hires_fix; // TODO: Needs to be fixed after reorg
|
|
||||||
if (width) state.width = width;
|
if (width) state.width = width;
|
||||||
if (height) state.height = height;
|
if (height) state.height = height;
|
||||||
|
|
||||||
@ -334,22 +346,17 @@ export const generationSlice = createSlice({
|
|||||||
setShouldUseSymmetry: (state, action: PayloadAction<boolean>) => {
|
setShouldUseSymmetry: (state, action: PayloadAction<boolean>) => {
|
||||||
state.shouldUseSymmetry = action.payload;
|
state.shouldUseSymmetry = action.payload;
|
||||||
},
|
},
|
||||||
setHorizontalSymmetryTimePercentage: (
|
setHorizontalSymmetrySteps: (state, action: PayloadAction<number>) => {
|
||||||
state,
|
state.horizontalSymmetrySteps = action.payload;
|
||||||
action: PayloadAction<number>
|
|
||||||
) => {
|
|
||||||
state.horizontalSymmetryTimePercentage = action.payload;
|
|
||||||
},
|
},
|
||||||
setVerticalSymmetryTimePercentage: (
|
setVerticalSymmetrySteps: (state, action: PayloadAction<number>) => {
|
||||||
state,
|
state.verticalSymmetrySteps = action.payload;
|
||||||
action: PayloadAction<number>
|
|
||||||
) => {
|
|
||||||
state.verticalSymmetryTimePercentage = action.payload;
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
export const {
|
export const {
|
||||||
|
clampSymmetrySteps,
|
||||||
clearInitialImage,
|
clearInitialImage,
|
||||||
resetParametersState,
|
resetParametersState,
|
||||||
resetSeed,
|
resetSeed,
|
||||||
@ -384,8 +391,8 @@ export const {
|
|||||||
setVariationAmount,
|
setVariationAmount,
|
||||||
setWidth,
|
setWidth,
|
||||||
setShouldUseSymmetry,
|
setShouldUseSymmetry,
|
||||||
setHorizontalSymmetryTimePercentage,
|
setHorizontalSymmetrySteps,
|
||||||
setVerticalSymmetryTimePercentage,
|
setVerticalSymmetrySteps,
|
||||||
} = generationSlice.actions;
|
} = generationSlice.actions;
|
||||||
|
|
||||||
export default generationSlice.reducer;
|
export default generationSlice.reducer;
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import type { PayloadAction } from '@reduxjs/toolkit';
|
import type { PayloadAction } from '@reduxjs/toolkit';
|
||||||
import { createSlice } from '@reduxjs/toolkit';
|
import { createSlice } from '@reduxjs/toolkit';
|
||||||
|
import * as InvokeAI from 'app/invokeai';
|
||||||
import { FACETOOL_TYPES } from 'app/constants';
|
import { FACETOOL_TYPES } from 'app/constants';
|
||||||
|
|
||||||
export type UpscalingLevel = 2 | 4;
|
export type UpscalingLevel = 2 | 4;
|
||||||
@ -40,6 +41,17 @@ export const postprocessingSlice = createSlice({
|
|||||||
name: 'postprocessing',
|
name: 'postprocessing',
|
||||||
initialState,
|
initialState,
|
||||||
reducers: {
|
reducers: {
|
||||||
|
setAllPostProcessingParameters: (
|
||||||
|
state,
|
||||||
|
action: PayloadAction<InvokeAI.Metadata>
|
||||||
|
) => {
|
||||||
|
const { type, hires_fix } = action.payload.image;
|
||||||
|
|
||||||
|
if (type === 'txt2img') {
|
||||||
|
state.hiresFix = Boolean(hires_fix);
|
||||||
|
// Strength of img2img used in hires_fix is not currently exposed in the Metadata for the final image.
|
||||||
|
}
|
||||||
|
},
|
||||||
setFacetoolStrength: (state, action: PayloadAction<number>) => {
|
setFacetoolStrength: (state, action: PayloadAction<number>) => {
|
||||||
state.facetoolStrength = action.payload;
|
state.facetoolStrength = action.payload;
|
||||||
},
|
},
|
||||||
@ -83,6 +95,7 @@ export const postprocessingSlice = createSlice({
|
|||||||
});
|
});
|
||||||
|
|
||||||
export const {
|
export const {
|
||||||
|
setAllPostProcessingParameters,
|
||||||
resetPostprocessingState,
|
resetPostprocessingState,
|
||||||
setCodeformerFidelity,
|
setCodeformerFidelity,
|
||||||
setFacetoolStrength,
|
setFacetoolStrength,
|
||||||
|
@ -57,19 +57,19 @@ export default function MergeModels() {
|
|||||||
|
|
||||||
const [modelMergeForce, setModelMergeForce] = useState<boolean>(false);
|
const [modelMergeForce, setModelMergeForce] = useState<boolean>(false);
|
||||||
|
|
||||||
const modelOneList = Object.keys(diffusersModels).filter((model) => {
|
const modelOneList = Object.keys(diffusersModels).filter(
|
||||||
if (model !== modelTwo && model !== modelThree) return model;
|
(model) => model !== modelTwo && model !== modelThree
|
||||||
});
|
);
|
||||||
|
|
||||||
const modelTwoList = Object.keys(diffusersModels).filter((model) => {
|
const modelTwoList = Object.keys(diffusersModels).filter(
|
||||||
if (model !== modelOne && model !== modelThree) return model;
|
(model) => model !== modelOne && model !== modelThree
|
||||||
});
|
);
|
||||||
|
|
||||||
const modelThreeList = [
|
const modelThreeList = [
|
||||||
'none',
|
{ key: t('modelManager.none'), value: 'none' },
|
||||||
...Object.keys(diffusersModels).filter((model) => {
|
...Object.keys(diffusersModels)
|
||||||
if (model !== modelOne && model !== modelTwo) return model;
|
.filter((model) => model !== modelOne && model !== modelTwo)
|
||||||
}),
|
.map((model) => ({ key: model, value: model })),
|
||||||
];
|
];
|
||||||
|
|
||||||
const isProcessing = useAppSelector(
|
const isProcessing = useAppSelector(
|
||||||
@ -209,18 +209,22 @@ export default function MergeModels() {
|
|||||||
<Flex columnGap={4}>
|
<Flex columnGap={4}>
|
||||||
{modelThree === 'none' ? (
|
{modelThree === 'none' ? (
|
||||||
<>
|
<>
|
||||||
<Radio value="weighted_sum">weighted_sum</Radio>
|
<Radio value="weighted_sum">
|
||||||
<Radio value="sigmoid">sigmoid</Radio>
|
{t('modelManager.weightedSum')}
|
||||||
<Radio value="inv_sigmoid">inv_sigmoid</Radio>
|
</Radio>
|
||||||
|
<Radio value="sigmoid">{t('modelManager.sigmoid')}</Radio>
|
||||||
|
<Radio value="inv_sigmoid">
|
||||||
|
{t('modelManager.inverseSigmoid')}
|
||||||
|
</Radio>
|
||||||
</>
|
</>
|
||||||
) : (
|
) : (
|
||||||
<Radio value="add_difference">
|
<Radio value="add_difference">
|
||||||
<Tooltip
|
<Tooltip
|
||||||
label={t(
|
label={t(
|
||||||
'modelmanager:modelMergeInterpAddDifferenceHelp'
|
'modelManager.modelMergeInterpAddDifferenceHelp'
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
add_difference
|
{t('modelManager.addDifference')}
|
||||||
</Tooltip>
|
</Tooltip>
|
||||||
</Radio>
|
</Radio>
|
||||||
)}
|
)}
|
||||||
|
@ -181,7 +181,8 @@ export default function SearchModels() {
|
|||||||
|
|
||||||
const configFiles = {
|
const configFiles = {
|
||||||
v1: 'configs/stable-diffusion/v1-inference.yaml',
|
v1: 'configs/stable-diffusion/v1-inference.yaml',
|
||||||
v2: 'configs/stable-diffusion/v2-inference-v.yaml',
|
v2_base: 'configs/stable-diffusion/v2-inference-v.yaml',
|
||||||
|
v2_768: 'configs/stable-diffusion/v2-inference-v.yaml',
|
||||||
inpainting: 'configs/stable-diffusion/v1-inpainting-inference.yaml',
|
inpainting: 'configs/stable-diffusion/v1-inpainting-inference.yaml',
|
||||||
custom: pathToConfig,
|
custom: pathToConfig,
|
||||||
};
|
};
|
||||||
@ -385,7 +386,8 @@ export default function SearchModels() {
|
|||||||
>
|
>
|
||||||
<Flex gap={4}>
|
<Flex gap={4}>
|
||||||
<Radio value="v1">{t('modelManager.v1')}</Radio>
|
<Radio value="v1">{t('modelManager.v1')}</Radio>
|
||||||
<Radio value="v2">{t('modelManager.v2')}</Radio>
|
<Radio value="v2_base">{t('modelManager.v2_base')}</Radio>
|
||||||
|
<Radio value="v2_768">{t('modelManager.v2_768')}</Radio>
|
||||||
<Radio value="inpainting">
|
<Radio value="inpainting">
|
||||||
{t('modelManager.inpainting')}
|
{t('modelManager.inpainting')}
|
||||||
</Radio>
|
</Radio>
|
||||||
|
@ -18,6 +18,7 @@ import { setParametersPanelScrollPosition } from 'features/ui/store/uiSlice';
|
|||||||
import InvokeAILogo from 'assets/images/logo.png';
|
import InvokeAILogo from 'assets/images/logo.png';
|
||||||
import { isEqual } from 'lodash';
|
import { isEqual } from 'lodash';
|
||||||
import { uiSelector } from '../store/uiSelectors';
|
import { uiSelector } from '../store/uiSelectors';
|
||||||
|
import { useTranslation } from 'react-i18next';
|
||||||
|
|
||||||
type Props = { children: ReactNode };
|
type Props = { children: ReactNode };
|
||||||
|
|
||||||
@ -60,6 +61,8 @@ const InvokeOptionsPanel = (props: Props) => {
|
|||||||
|
|
||||||
const { children } = props;
|
const { children } = props;
|
||||||
|
|
||||||
|
const { t } = useTranslation();
|
||||||
|
|
||||||
// Hotkeys
|
// Hotkeys
|
||||||
useHotkeys(
|
useHotkeys(
|
||||||
'o',
|
'o',
|
||||||
@ -176,7 +179,7 @@ const InvokeOptionsPanel = (props: Props) => {
|
|||||||
}
|
}
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
<Tooltip label="Pin Options Panel">
|
<Tooltip label={t('common.pinOptionsPanel')}>
|
||||||
<div
|
<div
|
||||||
className="parameters-panel-pin-button"
|
className="parameters-panel-pin-button"
|
||||||
data-selected={shouldPinParametersPanel}
|
data-selected={shouldPinParametersPanel}
|
||||||
|
5
invokeai/frontend/src/i18.d.ts
vendored
5
invokeai/frontend/src/i18.d.ts
vendored
@ -1,11 +1,16 @@
|
|||||||
import 'i18next';
|
import 'i18next';
|
||||||
|
|
||||||
|
import en from '../public/locales/en.json';
|
||||||
|
|
||||||
declare module 'i18next' {
|
declare module 'i18next' {
|
||||||
// Extend CustomTypeOptions
|
// Extend CustomTypeOptions
|
||||||
interface CustomTypeOptions {
|
interface CustomTypeOptions {
|
||||||
// Setting Default Namespace As English
|
// Setting Default Namespace As English
|
||||||
defaultNS: 'en';
|
defaultNS: 'en';
|
||||||
// Custom Types For Resources
|
// Custom Types For Resources
|
||||||
|
resources: {
|
||||||
|
en: typeof en;
|
||||||
|
};
|
||||||
// Never Return Null
|
// Never Return Null
|
||||||
returnNull: false;
|
returnNull: false;
|
||||||
}
|
}
|
||||||
|
File diff suppressed because one or more lines are too long
@ -200,6 +200,8 @@ class Generate:
|
|||||||
# it wasn't actually doing anything. This logic could be reinstated.
|
# it wasn't actually doing anything. This logic could be reinstated.
|
||||||
self.device = torch.device(choose_torch_device())
|
self.device = torch.device(choose_torch_device())
|
||||||
print(f">> Using device_type {self.device.type}")
|
print(f">> Using device_type {self.device.type}")
|
||||||
|
if self.device.type == 'cuda':
|
||||||
|
print(f">> CUDA device '{torch.cuda.get_device_name(torch.cuda.current_device())}' (GPU {os.environ.get('CUDA_VISIBLE_DEVICES') or 0})")
|
||||||
if full_precision:
|
if full_precision:
|
||||||
if self.precision != "auto":
|
if self.precision != "auto":
|
||||||
raise ValueError("Remove --full_precision / -F if using --precision")
|
raise ValueError("Remove --full_precision / -F if using --precision")
|
||||||
|
@ -22,7 +22,7 @@ from ..generate import Generate
|
|||||||
from .args import (Args, dream_cmd_from_png, metadata_dumps,
|
from .args import (Args, dream_cmd_from_png, metadata_dumps,
|
||||||
metadata_from_png)
|
metadata_from_png)
|
||||||
from .generator.diffusers_pipeline import PipelineIntermediateState
|
from .generator.diffusers_pipeline import PipelineIntermediateState
|
||||||
from .globals import Globals
|
from .globals import Globals, global_config_dir
|
||||||
from .image_util import make_grid
|
from .image_util import make_grid
|
||||||
from .log import write_log
|
from .log import write_log
|
||||||
from .model_manager import ModelManager
|
from .model_manager import ModelManager
|
||||||
@ -33,7 +33,6 @@ from ..util import url_attachment_name
|
|||||||
# global used in multiple functions (fix)
|
# global used in multiple functions (fix)
|
||||||
infile = None
|
infile = None
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Initialize command-line parsers and the diffusion model"""
|
"""Initialize command-line parsers and the diffusion model"""
|
||||||
global infile
|
global infile
|
||||||
@ -66,6 +65,9 @@ def main():
|
|||||||
Globals.sequential_guidance = args.sequential_guidance
|
Globals.sequential_guidance = args.sequential_guidance
|
||||||
Globals.ckpt_convert = args.ckpt_convert
|
Globals.ckpt_convert = args.ckpt_convert
|
||||||
|
|
||||||
|
# run any post-install patches needed
|
||||||
|
run_patches()
|
||||||
|
|
||||||
print(f">> Internet connectivity is {Globals.internet_available}")
|
print(f">> Internet connectivity is {Globals.internet_available}")
|
||||||
|
|
||||||
if not args.conf:
|
if not args.conf:
|
||||||
@ -156,10 +158,16 @@ def main():
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
report_model_error(opt, e)
|
report_model_error(opt, e)
|
||||||
|
|
||||||
|
# completer is the readline object
|
||||||
|
completer = get_completer(opt, models=gen.model_manager.list_models())
|
||||||
|
|
||||||
# try to autoconvert new models
|
# try to autoconvert new models
|
||||||
if path := opt.autoimport:
|
if path := opt.autoimport:
|
||||||
gen.model_manager.heuristic_import(
|
gen.model_manager.heuristic_import(
|
||||||
str(path), convert=False, commit_to_conf=opt.conf
|
str(path),
|
||||||
|
convert=False,
|
||||||
|
commit_to_conf=opt.conf,
|
||||||
|
config_file_callback=lambda x: _pick_configuration_file(completer,x),
|
||||||
)
|
)
|
||||||
|
|
||||||
if path := opt.autoconvert:
|
if path := opt.autoconvert:
|
||||||
@ -178,7 +186,7 @@ def main():
|
|||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
main_loop(gen, opt)
|
main_loop(gen, opt, completer)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
print(
|
print(
|
||||||
f'\nGoodbye!\nYou can start InvokeAI again by running the "invoke.bat" (or "invoke.sh") script from {Globals.root}'
|
f'\nGoodbye!\nYou can start InvokeAI again by running the "invoke.bat" (or "invoke.sh") script from {Globals.root}'
|
||||||
@ -189,7 +197,7 @@ def main():
|
|||||||
|
|
||||||
|
|
||||||
# TODO: main_loop() has gotten busy. Needs to be refactored.
|
# TODO: main_loop() has gotten busy. Needs to be refactored.
|
||||||
def main_loop(gen, opt):
|
def main_loop(gen, opt, completer):
|
||||||
"""prompt/read/execute loop"""
|
"""prompt/read/execute loop"""
|
||||||
global infile
|
global infile
|
||||||
done = False
|
done = False
|
||||||
@ -200,7 +208,6 @@ def main_loop(gen, opt):
|
|||||||
# The readline completer reads history from the .dream_history file located in the
|
# The readline completer reads history from the .dream_history file located in the
|
||||||
# output directory specified at the time of script launch. We do not currently support
|
# output directory specified at the time of script launch. We do not currently support
|
||||||
# changing the history file midstream when the output directory is changed.
|
# changing the history file midstream when the output directory is changed.
|
||||||
completer = get_completer(opt, models=gen.model_manager.list_models())
|
|
||||||
set_default_output_dir(opt, completer)
|
set_default_output_dir(opt, completer)
|
||||||
if gen.model:
|
if gen.model:
|
||||||
add_embedding_terms(gen, completer)
|
add_embedding_terms(gen, completer)
|
||||||
@ -389,6 +396,7 @@ def main_loop(gen, opt):
|
|||||||
prior_variations,
|
prior_variations,
|
||||||
postprocessed,
|
postprocessed,
|
||||||
first_seed,
|
first_seed,
|
||||||
|
gen.model_name,
|
||||||
)
|
)
|
||||||
path = file_writer.save_image_and_prompt_to_png(
|
path = file_writer.save_image_and_prompt_to_png(
|
||||||
image=image,
|
image=image,
|
||||||
@ -402,6 +410,7 @@ def main_loop(gen, opt):
|
|||||||
else first_seed
|
else first_seed
|
||||||
],
|
],
|
||||||
model_hash=gen.model_hash,
|
model_hash=gen.model_hash,
|
||||||
|
model_id=gen.model_name,
|
||||||
),
|
),
|
||||||
name=filename,
|
name=filename,
|
||||||
compress_level=opt.png_compression,
|
compress_level=opt.png_compression,
|
||||||
@ -657,10 +666,10 @@ def import_model(model_path: str, gen, opt, completer, convert=False):
|
|||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
description=model_desc,
|
description=model_desc,
|
||||||
convert=convert,
|
convert=convert,
|
||||||
|
config_file_callback=lambda x: _pick_configuration_file(completer,x),
|
||||||
)
|
)
|
||||||
|
|
||||||
if not imported_name:
|
if not imported_name:
|
||||||
print("** Import failed or was skipped")
|
print("** Aborting import.")
|
||||||
return
|
return
|
||||||
|
|
||||||
if not _verify_load(imported_name, gen):
|
if not _verify_load(imported_name, gen):
|
||||||
@ -674,6 +683,48 @@ def import_model(model_path: str, gen, opt, completer, convert=False):
|
|||||||
completer.update_models(gen.model_manager.list_models())
|
completer.update_models(gen.model_manager.list_models())
|
||||||
print(f">> {imported_name} successfully installed")
|
print(f">> {imported_name} successfully installed")
|
||||||
|
|
||||||
|
def _pick_configuration_file(completer, checkpoint_path: Path)->Path:
|
||||||
|
print(
|
||||||
|
f"""
|
||||||
|
Please select the type of the model at checkpoint {checkpoint_path}:
|
||||||
|
[1] A Stable Diffusion v1.x ckpt/safetensors model
|
||||||
|
[2] A Stable Diffusion v1.x inpainting ckpt/safetensors model
|
||||||
|
[3] A Stable Diffusion v2.x base model (512 pixels; there should be no 'parameterization:' line in its yaml file)
|
||||||
|
[4] A Stable Diffusion v2.x v-predictive model (768 pixels; look for a 'parameterization: "v"' line in its yaml file)
|
||||||
|
[5] Other (you will be prompted to enter the config file path)
|
||||||
|
[Q] I have no idea! Skip the import.
|
||||||
|
""")
|
||||||
|
choices = [
|
||||||
|
global_config_dir() / 'stable-diffusion' / x
|
||||||
|
for x in [
|
||||||
|
'v1-inference.yaml',
|
||||||
|
'v1-inpainting-inference.yaml',
|
||||||
|
'v2-inference.yaml',
|
||||||
|
'v2-inference-v.yaml',
|
||||||
|
]
|
||||||
|
]
|
||||||
|
|
||||||
|
ok = False
|
||||||
|
while not ok:
|
||||||
|
try:
|
||||||
|
choice = input('select 0-5, Q > ').strip()
|
||||||
|
if choice.startswith(('q','Q')):
|
||||||
|
return
|
||||||
|
if choice == '5':
|
||||||
|
completer.complete_extensions(('.yaml'))
|
||||||
|
choice = Path(input('Select config file for this model> ').strip()).absolute()
|
||||||
|
completer.complete_extensions(None)
|
||||||
|
ok = choice.exists()
|
||||||
|
else:
|
||||||
|
choice = choices[int(choice)-1]
|
||||||
|
ok = True
|
||||||
|
except (ValueError, IndexError):
|
||||||
|
print(f'{choice} is not a valid choice')
|
||||||
|
except EOFError:
|
||||||
|
return
|
||||||
|
return choice
|
||||||
|
|
||||||
|
|
||||||
def _verify_load(model_name: str, gen) -> bool:
|
def _verify_load(model_name: str, gen) -> bool:
|
||||||
print(">> Verifying that new model loads...")
|
print(">> Verifying that new model loads...")
|
||||||
current_model = gen.model_name
|
current_model = gen.model_name
|
||||||
@ -744,8 +795,8 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
|
|||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
return
|
return
|
||||||
|
|
||||||
manager.commit(opt.conf)
|
manager.commit(opt.conf)
|
||||||
if click.confirm(f"Delete the original .ckpt file at {ckpt_path}?", default=False):
|
if ckpt_path and click.confirm(f"Delete the original .ckpt file at {ckpt_path}?", default=False):
|
||||||
ckpt_path.unlink(missing_ok=True)
|
ckpt_path.unlink(missing_ok=True)
|
||||||
print(f"{ckpt_path} deleted")
|
print(f"{ckpt_path} deleted")
|
||||||
|
|
||||||
@ -941,13 +992,14 @@ def add_postprocessing_to_metadata(opt, original_file, new_file, tool, command):
|
|||||||
|
|
||||||
|
|
||||||
def prepare_image_metadata(
|
def prepare_image_metadata(
|
||||||
opt,
|
opt,
|
||||||
prefix,
|
prefix,
|
||||||
seed,
|
seed,
|
||||||
operation="generate",
|
operation="generate",
|
||||||
prior_variations=[],
|
prior_variations=[],
|
||||||
postprocessed=False,
|
postprocessed=False,
|
||||||
first_seed=None,
|
first_seed=None,
|
||||||
|
model_id='unknown',
|
||||||
):
|
):
|
||||||
if postprocessed and opt.save_original:
|
if postprocessed and opt.save_original:
|
||||||
filename = choose_postprocess_name(opt, prefix, seed)
|
filename = choose_postprocess_name(opt, prefix, seed)
|
||||||
@ -955,6 +1007,7 @@ def prepare_image_metadata(
|
|||||||
wildcards = dict(opt.__dict__)
|
wildcards = dict(opt.__dict__)
|
||||||
wildcards["prefix"] = prefix
|
wildcards["prefix"] = prefix
|
||||||
wildcards["seed"] = seed
|
wildcards["seed"] = seed
|
||||||
|
wildcards["model_id"] = model_id
|
||||||
try:
|
try:
|
||||||
filename = opt.fnformat.format(**wildcards)
|
filename = opt.fnformat.format(**wildcards)
|
||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
@ -972,18 +1025,17 @@ def prepare_image_metadata(
|
|||||||
first_seed = first_seed or seed
|
first_seed = first_seed or seed
|
||||||
this_variation = [[seed, opt.variation_amount]]
|
this_variation = [[seed, opt.variation_amount]]
|
||||||
opt.with_variations = prior_variations + this_variation
|
opt.with_variations = prior_variations + this_variation
|
||||||
formatted_dream_prompt = opt.dream_prompt_str(seed=first_seed)
|
formatted_dream_prompt = opt.dream_prompt_str(seed=first_seed,model_id=model_id)
|
||||||
elif len(prior_variations) > 0:
|
elif len(prior_variations) > 0:
|
||||||
formatted_dream_prompt = opt.dream_prompt_str(seed=first_seed)
|
formatted_dream_prompt = opt.dream_prompt_str(seed=first_seed,model_id=model_id)
|
||||||
elif operation == "postprocess":
|
elif operation == "postprocess":
|
||||||
formatted_dream_prompt = "!fix " + opt.dream_prompt_str(
|
formatted_dream_prompt = "!fix " + opt.dream_prompt_str(
|
||||||
seed=seed, prompt=opt.input_file_path
|
seed=seed, prompt=opt.input_file_path, model_id=model_id,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
formatted_dream_prompt = opt.dream_prompt_str(seed=seed)
|
formatted_dream_prompt = opt.dream_prompt_str(seed=seed,model_id=model_id)
|
||||||
return filename, formatted_dream_prompt
|
return filename, formatted_dream_prompt
|
||||||
|
|
||||||
|
|
||||||
def choose_postprocess_name(opt, prefix, seed) -> str:
|
def choose_postprocess_name(opt, prefix, seed) -> str:
|
||||||
match = re.search("postprocess:(\w+)", opt.last_operation)
|
match = re.search("postprocess:(\w+)", opt.last_operation)
|
||||||
if match:
|
if match:
|
||||||
@ -1234,6 +1286,20 @@ def check_internet() -> bool:
|
|||||||
except:
|
except:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
# This routine performs any patch-ups needed after installation
|
||||||
|
def run_patches():
|
||||||
|
# install ckpt configuration files that may have been added to the
|
||||||
|
# distro after original root directory configuration
|
||||||
|
import invokeai.configs as conf
|
||||||
|
from shutil import copyfile
|
||||||
|
|
||||||
|
root_configs = Path(global_config_dir(), 'stable-diffusion')
|
||||||
|
repo_configs = Path(conf.__path__[0], 'stable-diffusion')
|
||||||
|
for src in repo_configs.iterdir():
|
||||||
|
dest = root_configs / src.name
|
||||||
|
if not dest.exists():
|
||||||
|
copyfile(src,dest)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
||||||
|
@ -1 +1,2 @@
|
|||||||
__version__='2.3.1'
|
|
||||||
|
__version__='2.3.3-rc1'
|
||||||
|
@ -333,7 +333,7 @@ class Args(object):
|
|||||||
switches.append(f'-V {formatted_variations}')
|
switches.append(f'-V {formatted_variations}')
|
||||||
if 'variations' in a and len(a['variations'])>0:
|
if 'variations' in a and len(a['variations'])>0:
|
||||||
switches.append(f'-V {a["variations"]}')
|
switches.append(f'-V {a["variations"]}')
|
||||||
return ' '.join(switches)
|
return ' '.join(switches) + f' # model_id={kwargs.get("model_id","unknown model")}'
|
||||||
|
|
||||||
def __getattribute__(self,name):
|
def __getattribute__(self,name):
|
||||||
'''
|
'''
|
||||||
@ -878,7 +878,7 @@ class Args(object):
|
|||||||
)
|
)
|
||||||
render_group.add_argument(
|
render_group.add_argument(
|
||||||
'--fnformat',
|
'--fnformat',
|
||||||
default='{prefix}.{seed}.png',
|
default=None,
|
||||||
type=str,
|
type=str,
|
||||||
help='Overwrite the filename format. You can use any argument as wildcard enclosed in curly braces. Default is {prefix}.{seed}.png',
|
help='Overwrite the filename format. You can use any argument as wildcard enclosed in curly braces. Default is {prefix}.{seed}.png',
|
||||||
)
|
)
|
||||||
@ -1155,6 +1155,7 @@ def format_metadata(**kwargs):
|
|||||||
def metadata_dumps(opt,
|
def metadata_dumps(opt,
|
||||||
seeds=[],
|
seeds=[],
|
||||||
model_hash=None,
|
model_hash=None,
|
||||||
|
model_id=None,
|
||||||
postprocessing=None):
|
postprocessing=None):
|
||||||
'''
|
'''
|
||||||
Given an Args object, returns a dict containing the keys and
|
Given an Args object, returns a dict containing the keys and
|
||||||
@ -1167,7 +1168,7 @@ def metadata_dumps(opt,
|
|||||||
# top-level metadata minus `image` or `images`
|
# top-level metadata minus `image` or `images`
|
||||||
metadata = {
|
metadata = {
|
||||||
'model' : 'stable diffusion',
|
'model' : 'stable diffusion',
|
||||||
'model_id' : opt.model,
|
'model_id' : model_id or opt.model,
|
||||||
'model_hash' : model_hash,
|
'model_hash' : model_hash,
|
||||||
'app_id' : ldm.invoke.__app_id__,
|
'app_id' : ldm.invoke.__app_id__,
|
||||||
'app_version' : ldm.invoke.__version__,
|
'app_version' : ldm.invoke.__version__,
|
||||||
@ -1180,7 +1181,7 @@ def metadata_dumps(opt,
|
|||||||
)
|
)
|
||||||
|
|
||||||
# remove any image keys not mentioned in RFC #266
|
# remove any image keys not mentioned in RFC #266
|
||||||
rfc266_img_fields = ['type','postprocessing','sampler','prompt','seed','variations','steps',
|
rfc266_img_fields = ['type','postprocessing','sampler','prompt','seed','variations','steps','hires_fix',
|
||||||
'cfg_scale','threshold','perlin','step_number','width','height','extra','strength','seamless'
|
'cfg_scale','threshold','perlin','step_number','width','height','extra','strength','seamless'
|
||||||
'init_img','init_mask','facetool','facetool_strength','upscale','h_symmetry_time_pct',
|
'init_img','init_mask','facetool','facetool_strength','upscale','h_symmetry_time_pct',
|
||||||
'v_symmetry_time_pct']
|
'v_symmetry_time_pct']
|
||||||
|
@ -327,10 +327,10 @@ def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False
|
|||||||
unet_key = "model.diffusion_model."
|
unet_key = "model.diffusion_model."
|
||||||
# at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA
|
# at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA
|
||||||
if sum(k.startswith("model_ema") for k in keys) > 100:
|
if sum(k.startswith("model_ema") for k in keys) > 100:
|
||||||
print(f" | Checkpoint {path} has both EMA and non-EMA weights.")
|
print(f" | Checkpoint {path} has both EMA and non-EMA weights.")
|
||||||
if extract_ema:
|
if extract_ema:
|
||||||
print(
|
print(
|
||||||
' | Extracting EMA weights (usually better for inference)'
|
' | Extracting EMA weights (usually better for inference)'
|
||||||
)
|
)
|
||||||
for key in keys:
|
for key in keys:
|
||||||
if key.startswith("model.diffusion_model"):
|
if key.startswith("model.diffusion_model"):
|
||||||
@ -338,7 +338,7 @@ def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False
|
|||||||
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key)
|
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key)
|
||||||
else:
|
else:
|
||||||
print(
|
print(
|
||||||
' | Extracting only the non-EMA weights (usually better for fine-tuning)'
|
' | Extracting only the non-EMA weights (usually better for fine-tuning)'
|
||||||
)
|
)
|
||||||
|
|
||||||
for key in keys:
|
for key in keys:
|
||||||
@ -809,6 +809,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
|||||||
vae:AutoencoderKL=None,
|
vae:AutoencoderKL=None,
|
||||||
precision:torch.dtype=torch.float32,
|
precision:torch.dtype=torch.float32,
|
||||||
return_generator_pipeline:bool=False,
|
return_generator_pipeline:bool=False,
|
||||||
|
scan_needed:bool=True,
|
||||||
)->Union[StableDiffusionPipeline,StableDiffusionGeneratorPipeline]:
|
)->Union[StableDiffusionPipeline,StableDiffusionGeneratorPipeline]:
|
||||||
'''
|
'''
|
||||||
Load a Stable Diffusion pipeline object from a CompVis-style `.ckpt`/`.safetensors` file and (ideally) a `.yaml`
|
Load a Stable Diffusion pipeline object from a CompVis-style `.ckpt`/`.safetensors` file and (ideally) a `.yaml`
|
||||||
@ -843,7 +844,12 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
|||||||
verbosity = dlogging.get_verbosity()
|
verbosity = dlogging.get_verbosity()
|
||||||
dlogging.set_verbosity_error()
|
dlogging.set_verbosity_error()
|
||||||
|
|
||||||
checkpoint = load_file(checkpoint_path) if Path(checkpoint_path).suffix == '.safetensors' else torch.load(checkpoint_path)
|
if Path(checkpoint_path).suffix == '.ckpt':
|
||||||
|
if scan_needed:
|
||||||
|
ModelManager.scan_model(checkpoint_path,checkpoint_path)
|
||||||
|
checkpoint = torch.load(checkpoint_path)
|
||||||
|
else:
|
||||||
|
checkpoint = load_file(checkpoint_path)
|
||||||
cache_dir = global_cache_dir('hub')
|
cache_dir = global_cache_dir('hub')
|
||||||
pipeline_class = StableDiffusionGeneratorPipeline if return_generator_pipeline else StableDiffusionPipeline
|
pipeline_class = StableDiffusionGeneratorPipeline if return_generator_pipeline else StableDiffusionPipeline
|
||||||
|
|
||||||
@ -851,7 +857,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
|||||||
if "global_step" in checkpoint:
|
if "global_step" in checkpoint:
|
||||||
global_step = checkpoint["global_step"]
|
global_step = checkpoint["global_step"]
|
||||||
else:
|
else:
|
||||||
print(" | global_step key not found in model")
|
print(" | global_step key not found in model")
|
||||||
global_step = None
|
global_step = None
|
||||||
|
|
||||||
# sometimes there is a state_dict key and sometimes not
|
# sometimes there is a state_dict key and sometimes not
|
||||||
@ -862,12 +868,16 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
|||||||
if original_config_file is None:
|
if original_config_file is None:
|
||||||
model_type = ModelManager.probe_model_type(checkpoint)
|
model_type = ModelManager.probe_model_type(checkpoint)
|
||||||
|
|
||||||
if model_type == SDLegacyType.V2:
|
if model_type == SDLegacyType.V2_v:
|
||||||
original_config_file = global_config_dir() / 'stable-diffusion' / 'v2-inference-v.yaml'
|
original_config_file = global_config_dir() / 'stable-diffusion' / 'v2-inference-v.yaml'
|
||||||
if global_step == 110000:
|
if global_step == 110000:
|
||||||
# v2.1 needs to upcast attention
|
# v2.1 needs to upcast attention
|
||||||
upcast_attention = True
|
upcast_attention = True
|
||||||
|
elif model_type == SDLegacyType.V2_e:
|
||||||
|
original_config_file = (
|
||||||
|
global_config_dir() / "stable-diffusion" / "v2-inference.yaml"
|
||||||
|
)
|
||||||
|
|
||||||
elif model_type == SDLegacyType.V1_INPAINT:
|
elif model_type == SDLegacyType.V1_INPAINT:
|
||||||
original_config_file = global_config_dir() / 'stable-diffusion' / 'v1-inpainting-inference.yaml'
|
original_config_file = global_config_dir() / 'stable-diffusion' / 'v1-inpainting-inference.yaml'
|
||||||
|
|
||||||
@ -949,14 +959,14 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
|||||||
|
|
||||||
# Convert the VAE model, or use the one passed
|
# Convert the VAE model, or use the one passed
|
||||||
if not vae:
|
if not vae:
|
||||||
print(' | Using checkpoint model\'s original VAE')
|
print(' | Using checkpoint model\'s original VAE')
|
||||||
vae_config = create_vae_diffusers_config(original_config, image_size=image_size)
|
vae_config = create_vae_diffusers_config(original_config, image_size=image_size)
|
||||||
converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config)
|
converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config)
|
||||||
|
|
||||||
vae = AutoencoderKL(**vae_config)
|
vae = AutoencoderKL(**vae_config)
|
||||||
vae.load_state_dict(converted_vae_checkpoint)
|
vae.load_state_dict(converted_vae_checkpoint)
|
||||||
else:
|
else:
|
||||||
print(' | Using external VAE specified in config')
|
print(' | Using VAE specified in config')
|
||||||
|
|
||||||
# Convert the text model.
|
# Convert the text model.
|
||||||
model_type = pipeline_type
|
model_type = pipeline_type
|
||||||
@ -1002,7 +1012,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
|||||||
tokenizer=tokenizer,
|
tokenizer=tokenizer,
|
||||||
unet=unet.to(precision),
|
unet=unet.to(precision),
|
||||||
scheduler=scheduler,
|
scheduler=scheduler,
|
||||||
safety_checker=safety_checker.to(precision),
|
safety_checker=None if return_generator_pipeline else safety_checker.to(precision),
|
||||||
feature_extractor=feature_extractor,
|
feature_extractor=feature_extractor,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
@ -9,7 +9,7 @@ get_uc_and_c_and_ec() get the conditioned and unconditioned latent, an
|
|||||||
import re
|
import re
|
||||||
from typing import Union, Optional, Any
|
from typing import Union, Optional, Any
|
||||||
|
|
||||||
from transformers import CLIPTokenizer, CLIPTextModel
|
from transformers import CLIPTokenizer
|
||||||
|
|
||||||
from compel import Compel
|
from compel import Compel
|
||||||
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser
|
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser
|
||||||
@ -52,6 +52,8 @@ def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_l
|
|||||||
textual_inversion_manager=model.textual_inversion_manager,
|
textual_inversion_manager=model.textual_inversion_manager,
|
||||||
dtype_for_device_getter=torch_dtype)
|
dtype_for_device_getter=torch_dtype)
|
||||||
|
|
||||||
|
# get rid of any newline characters
|
||||||
|
prompt_string = prompt_string.replace("\n", " ")
|
||||||
positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string)
|
positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string)
|
||||||
legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend)
|
legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend)
|
||||||
positive_prompt: FlattenedPrompt|Blend
|
positive_prompt: FlattenedPrompt|Blend
|
||||||
@ -113,7 +115,7 @@ def get_tokens_for_prompt_object(tokenizer, parsed_prompt: FlattenedPrompt, trun
|
|||||||
return tokens
|
return tokens
|
||||||
|
|
||||||
|
|
||||||
def split_prompt_to_positive_and_negative(prompt_string_uncleaned):
|
def split_prompt_to_positive_and_negative(prompt_string_uncleaned: str):
|
||||||
unconditioned_words = ''
|
unconditioned_words = ''
|
||||||
unconditional_regex = r'\[(.*?)\]'
|
unconditional_regex = r'\[(.*?)\]'
|
||||||
unconditionals = re.findall(unconditional_regex, prompt_string_uncleaned)
|
unconditionals = re.findall(unconditional_regex, prompt_string_uncleaned)
|
||||||
|
@ -290,7 +290,7 @@ def download_vaes():
|
|||||||
# first the diffusers version
|
# first the diffusers version
|
||||||
repo_id = "stabilityai/sd-vae-ft-mse"
|
repo_id = "stabilityai/sd-vae-ft-mse"
|
||||||
args = dict(
|
args = dict(
|
||||||
cache_dir=global_cache_dir("diffusers"),
|
cache_dir=global_cache_dir("hub"),
|
||||||
)
|
)
|
||||||
if not AutoencoderKL.from_pretrained(repo_id, **args):
|
if not AutoencoderKL.from_pretrained(repo_id, **args):
|
||||||
raise Exception(f"download of {repo_id} failed")
|
raise Exception(f"download of {repo_id} failed")
|
||||||
@ -712,8 +712,8 @@ def write_opts(opts: Namespace, init_file: Path):
|
|||||||
out_file.write(line + "\n")
|
out_file.write(line + "\n")
|
||||||
out_file.write(
|
out_file.write(
|
||||||
f"""
|
f"""
|
||||||
--outdir={opts.outdir}
|
--outdir="{opts.outdir}"
|
||||||
--embedding_path={opts.embedding_path}
|
--embedding_path="{opts.embedding_path}"
|
||||||
--precision={opts.precision}
|
--precision={opts.precision}
|
||||||
--max_loaded_models={int(opts.max_loaded_models)}
|
--max_loaded_models={int(opts.max_loaded_models)}
|
||||||
--{'no-' if not opts.safety_checker else ''}nsfw_checker
|
--{'no-' if not opts.safety_checker else ''}nsfw_checker
|
||||||
|
@ -72,7 +72,7 @@ def main():
|
|||||||
tag = Prompt.ask('Enter an InvokeAI tag or branch name')
|
tag = Prompt.ask('Enter an InvokeAI tag or branch name')
|
||||||
|
|
||||||
print(f':crossed_fingers: Upgrading to [yellow]{tag}[/yellow]')
|
print(f':crossed_fingers: Upgrading to [yellow]{tag}[/yellow]')
|
||||||
cmd = f'pip install {INVOKE_AI_SRC}/{tag}.zip --use-pep517'
|
cmd = f'pip install {INVOKE_AI_SRC}/{tag}.zip --use-pep517 --upgrade'
|
||||||
print('')
|
print('')
|
||||||
print('')
|
print('')
|
||||||
if os.system(cmd)==0:
|
if os.system(cmd)==0:
|
||||||
|
@ -109,6 +109,7 @@ def install_requested_models(
|
|||||||
model_manager.heuristic_import(
|
model_manager.heuristic_import(
|
||||||
path_url_or_repo,
|
path_url_or_repo,
|
||||||
convert=convert_to_diffusers,
|
convert=convert_to_diffusers,
|
||||||
|
config_file_callback=_pick_configuration_file,
|
||||||
commit_to_conf=config_file_path
|
commit_to_conf=config_file_path
|
||||||
)
|
)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
@ -126,7 +127,7 @@ def install_requested_models(
|
|||||||
while line := input.readline():
|
while line := input.readline():
|
||||||
if not line.startswith(argument):
|
if not line.startswith(argument):
|
||||||
output.writelines([line])
|
output.writelines([line])
|
||||||
output.writelines([f'{argument} {directory}'])
|
output.writelines([f'{argument} "{directory}"'])
|
||||||
os.replace(replacement,initfile)
|
os.replace(replacement,initfile)
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
@ -138,6 +139,45 @@ def yes_or_no(prompt: str, default_yes=True):
|
|||||||
else:
|
else:
|
||||||
return response[0] in ("y", "Y")
|
return response[0] in ("y", "Y")
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
def _pick_configuration_file(checkpoint_path: Path)->Path:
|
||||||
|
print(
|
||||||
|
"""
|
||||||
|
Please select the type of this model:
|
||||||
|
[1] A Stable Diffusion v1.x ckpt/safetensors model
|
||||||
|
[2] A Stable Diffusion v1.x inpainting ckpt/safetensors model
|
||||||
|
[3] A Stable Diffusion v2.x base model (512 pixels; no 'parameterization:' in its yaml file)
|
||||||
|
[4] A Stable Diffusion v2.x v-predictive model (768 pixels; look for 'parameterization: "v"' in its yaml file)
|
||||||
|
[5] Other (you will be prompted to enter the config file path)
|
||||||
|
[Q] I have no idea! Skip the import.
|
||||||
|
""")
|
||||||
|
choices = [
|
||||||
|
global_config_dir() / 'stable-diffusion' / x
|
||||||
|
for x in [
|
||||||
|
'v1-inference.yaml',
|
||||||
|
'v1-inpainting-inference.yaml',
|
||||||
|
'v2-inference.yaml',
|
||||||
|
'v2-inference-v.yaml',
|
||||||
|
]
|
||||||
|
]
|
||||||
|
|
||||||
|
ok = False
|
||||||
|
while not ok:
|
||||||
|
try:
|
||||||
|
choice = input('select 0-5, Q > ').strip()
|
||||||
|
if choice.startswith(('q','Q')):
|
||||||
|
return
|
||||||
|
if choice == '5':
|
||||||
|
choice = Path(input('Select config file for this model> ').strip()).absolute()
|
||||||
|
ok = choice.exists()
|
||||||
|
else:
|
||||||
|
choice = choices[int(choice)-1]
|
||||||
|
ok = True
|
||||||
|
except (ValueError, IndexError):
|
||||||
|
print(f'{choice} is not a valid choice')
|
||||||
|
except EOFError:
|
||||||
|
return
|
||||||
|
return choice
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def get_root(root: str = None) -> str:
|
def get_root(root: str = None) -> str:
|
||||||
@ -262,7 +302,6 @@ def _download_diffusion_weights(
|
|||||||
path = download_from_hf(
|
path = download_from_hf(
|
||||||
model_class,
|
model_class,
|
||||||
repo_id,
|
repo_id,
|
||||||
cache_subdir="diffusers",
|
|
||||||
safety_checker=None,
|
safety_checker=None,
|
||||||
**extra_args,
|
**extra_args,
|
||||||
)
|
)
|
||||||
|
535
ldm/invoke/dynamic_prompts.py
Executable file
535
ldm/invoke/dynamic_prompts.py
Executable file
@ -0,0 +1,535 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
"""
|
||||||
|
Simple script to generate a file of InvokeAI prompts and settings
|
||||||
|
that scan across steps and other parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import io
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import pydoc
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from contextlib import redirect_stderr
|
||||||
|
from io import TextIOBase
|
||||||
|
from itertools import product
|
||||||
|
from multiprocessing import Process
|
||||||
|
from multiprocessing.connection import Connection, Pipe
|
||||||
|
from pathlib import Path
|
||||||
|
from tempfile import gettempdir
|
||||||
|
from typing import Callable, Iterable, List
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import yaml
|
||||||
|
from omegaconf import OmegaConf, dictconfig, listconfig
|
||||||
|
|
||||||
|
|
||||||
|
def expand_prompts(
|
||||||
|
template_file: Path,
|
||||||
|
run_invoke: bool = False,
|
||||||
|
invoke_model: str = None,
|
||||||
|
invoke_outdir: Path = None,
|
||||||
|
processes_per_gpu: int = 1,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
:param template_file: A YAML file containing templated prompts and args
|
||||||
|
:param run_invoke: A boolean which if True will pass expanded prompts to invokeai CLI
|
||||||
|
:param invoke_model: Name of the model to load when run_invoke is true; otherwise uses default
|
||||||
|
:param invoke_outdir: Directory for outputs when run_invoke is true; otherwise uses default
|
||||||
|
"""
|
||||||
|
if template_file.name.endswith(".json"):
|
||||||
|
with open(template_file, "r") as file:
|
||||||
|
with io.StringIO(yaml.dump(json.load(file))) as fh:
|
||||||
|
conf = OmegaConf.load(fh)
|
||||||
|
else:
|
||||||
|
conf = OmegaConf.load(template_file)
|
||||||
|
|
||||||
|
# loading here to avoid long wait for help message
|
||||||
|
import torch
|
||||||
|
|
||||||
|
torch.multiprocessing.set_start_method("spawn")
|
||||||
|
gpu_count = torch.cuda.device_count() if torch.cuda.is_available() else 1
|
||||||
|
commands = expanded_invokeai_commands(conf, run_invoke)
|
||||||
|
children = list()
|
||||||
|
|
||||||
|
try:
|
||||||
|
if run_invoke:
|
||||||
|
invokeai_args = [shutil.which("invokeai"), "--from_file", "-"]
|
||||||
|
if invoke_model:
|
||||||
|
invokeai_args.extend(("--model", invoke_model))
|
||||||
|
if invoke_outdir:
|
||||||
|
outdir = os.path.expanduser(invoke_outdir)
|
||||||
|
invokeai_args.extend(("--outdir", outdir))
|
||||||
|
else:
|
||||||
|
outdir = gettempdir()
|
||||||
|
logdir = Path(outdir, "invokeai-batch-logs")
|
||||||
|
|
||||||
|
processes_to_launch = gpu_count * processes_per_gpu
|
||||||
|
print(
|
||||||
|
f">> Spawning {processes_to_launch} invokeai processes across {gpu_count} CUDA gpus",
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f'>> Outputs will be written into {invoke_outdir or "default InvokeAI outputs directory"}, and error logs will be written to {logdir}',
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
import ldm.invoke.CLI
|
||||||
|
|
||||||
|
parent_conn, child_conn = Pipe()
|
||||||
|
children = set()
|
||||||
|
for i in range(processes_to_launch):
|
||||||
|
p = Process(
|
||||||
|
target=_run_invoke,
|
||||||
|
kwargs=dict(
|
||||||
|
entry_point=ldm.invoke.CLI.main,
|
||||||
|
conn_in=child_conn,
|
||||||
|
conn_out=parent_conn,
|
||||||
|
args=invokeai_args,
|
||||||
|
gpu=i % gpu_count,
|
||||||
|
logdir=logdir,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
p.start()
|
||||||
|
children.add(p)
|
||||||
|
child_conn.close()
|
||||||
|
sequence = 0
|
||||||
|
for command in commands:
|
||||||
|
sequence += 1
|
||||||
|
parent_conn.send(
|
||||||
|
command + f' --fnformat="dp.{sequence:04}.{{prompt}}.png"'
|
||||||
|
)
|
||||||
|
parent_conn.close()
|
||||||
|
else:
|
||||||
|
for command in commands:
|
||||||
|
print(command)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
for p in children:
|
||||||
|
p.terminate()
|
||||||
|
|
||||||
|
|
||||||
|
class MessageToStdin(object):
|
||||||
|
def __init__(self, connection: Connection):
|
||||||
|
self.connection = connection
|
||||||
|
self.linebuffer = list()
|
||||||
|
|
||||||
|
def readline(self) -> str:
|
||||||
|
try:
|
||||||
|
if len(self.linebuffer) == 0:
|
||||||
|
message = self.connection.recv()
|
||||||
|
self.linebuffer = message.split("\n")
|
||||||
|
result = self.linebuffer.pop(0)
|
||||||
|
return result
|
||||||
|
except EOFError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class FilterStream(object):
|
||||||
|
def __init__(
|
||||||
|
self, stream: TextIOBase, include: re.Pattern = None, exclude: re.Pattern = None
|
||||||
|
):
|
||||||
|
self.stream = stream
|
||||||
|
self.include = include
|
||||||
|
self.exclude = exclude
|
||||||
|
|
||||||
|
def write(self, data: str):
|
||||||
|
if self.include and self.include.match(data):
|
||||||
|
self.stream.write(data)
|
||||||
|
self.stream.flush()
|
||||||
|
elif self.exclude and not self.exclude.match(data):
|
||||||
|
self.stream.write(data)
|
||||||
|
self.stream.flush()
|
||||||
|
|
||||||
|
def flush(self):
|
||||||
|
self.stream.flush()
|
||||||
|
|
||||||
|
|
||||||
|
def _run_invoke(
|
||||||
|
entry_point: Callable,
|
||||||
|
conn_in: Connection,
|
||||||
|
conn_out: Connection,
|
||||||
|
args: List[str],
|
||||||
|
logdir: Path,
|
||||||
|
gpu: int = 0,
|
||||||
|
):
|
||||||
|
pid = os.getpid()
|
||||||
|
logdir.mkdir(parents=True, exist_ok=True)
|
||||||
|
logfile = Path(logdir, f'{time.strftime("%Y-%m-%d_%H-%M-%S")}-pid={pid}.txt')
|
||||||
|
print(
|
||||||
|
f">> Process {pid} running on GPU {gpu}; logging to {logfile}", file=sys.stderr
|
||||||
|
)
|
||||||
|
conn_out.close()
|
||||||
|
os.environ["CUDA_VISIBLE_DEVICES"] = f"{gpu}"
|
||||||
|
sys.argv = args
|
||||||
|
sys.stdin = MessageToStdin(conn_in)
|
||||||
|
sys.stdout = FilterStream(sys.stdout, include=re.compile("^\[\d+\]"))
|
||||||
|
with open(logfile, "w") as stderr, redirect_stderr(stderr):
|
||||||
|
entry_point()
|
||||||
|
|
||||||
|
|
||||||
|
def _filter_output(stream: TextIOBase):
|
||||||
|
while line := stream.readline():
|
||||||
|
if re.match("^\[\d+\]", line):
|
||||||
|
print(line)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=HELP,
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"template_file",
|
||||||
|
type=Path,
|
||||||
|
nargs="?",
|
||||||
|
help="path to a template file, use --example to generate an example file",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--example",
|
||||||
|
action="store_true",
|
||||||
|
default=False,
|
||||||
|
help=f'Print an example template file in YAML format. Use "{sys.argv[0]} --example > example.yaml" to save output to a file',
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--json-example",
|
||||||
|
action="store_true",
|
||||||
|
default=False,
|
||||||
|
help=f'Print an example template file in json format. Use "{sys.argv[0]} --json-example > example.json" to save output to a file',
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--instructions",
|
||||||
|
"-i",
|
||||||
|
dest="instructions",
|
||||||
|
action="store_true",
|
||||||
|
default=False,
|
||||||
|
help="Print verbose instructions.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--invoke",
|
||||||
|
action="store_true",
|
||||||
|
help="Execute invokeai using specified optional --model, --processes_per_gpu and --outdir",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--model",
|
||||||
|
help="Feed the generated prompts to the invokeai CLI using the indicated model. Will be overriden by a model: section in template file.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--outdir", type=Path, help="Write images and log into indicated directory"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--processes_per_gpu",
|
||||||
|
type=int,
|
||||||
|
default=1,
|
||||||
|
help="When executing invokeai, how many parallel processes to execute per CUDA GPU.",
|
||||||
|
)
|
||||||
|
opt = parser.parse_args()
|
||||||
|
|
||||||
|
if opt.example:
|
||||||
|
print(EXAMPLE_TEMPLATE_FILE)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
if opt.json_example:
|
||||||
|
print(_yaml_to_json(EXAMPLE_TEMPLATE_FILE))
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
if opt.instructions:
|
||||||
|
pydoc.pager(INSTRUCTIONS)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
if not opt.template_file:
|
||||||
|
parser.print_help()
|
||||||
|
sys.exit(-1)
|
||||||
|
|
||||||
|
expand_prompts(
|
||||||
|
template_file=opt.template_file,
|
||||||
|
run_invoke=opt.invoke,
|
||||||
|
invoke_model=opt.model,
|
||||||
|
invoke_outdir=opt.outdir,
|
||||||
|
processes_per_gpu=opt.processes_per_gpu,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def expanded_invokeai_commands(
|
||||||
|
conf: OmegaConf, always_switch_models: bool = False
|
||||||
|
) -> List[List[str]]:
|
||||||
|
models = expand_values(conf.get("model"))
|
||||||
|
steps = expand_values(conf.get("steps")) or [30]
|
||||||
|
cfgs = expand_values(conf.get("cfg")) or [7.5]
|
||||||
|
samplers = expand_values(conf.get("sampler")) or ["ddim"]
|
||||||
|
seeds = expand_values(conf.get("seed")) or [0]
|
||||||
|
dimensions = expand_values(conf.get("dimensions")) or ["512x512"]
|
||||||
|
init_img = expand_values(conf.get("init_img")) or [""]
|
||||||
|
perlin = expand_values(conf.get("perlin")) or [0]
|
||||||
|
threshold = expand_values(conf.get("threshold")) or [0]
|
||||||
|
strength = expand_values(conf.get("strength")) or [0.75]
|
||||||
|
prompts = expand_prompt(conf.get("prompt")) or ["banana sushi"]
|
||||||
|
|
||||||
|
cross_product = product(
|
||||||
|
*[
|
||||||
|
models,
|
||||||
|
seeds,
|
||||||
|
prompts,
|
||||||
|
samplers,
|
||||||
|
cfgs,
|
||||||
|
steps,
|
||||||
|
perlin,
|
||||||
|
threshold,
|
||||||
|
init_img,
|
||||||
|
strength,
|
||||||
|
dimensions,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
previous_model = None
|
||||||
|
|
||||||
|
result = list()
|
||||||
|
for p in cross_product:
|
||||||
|
(
|
||||||
|
model,
|
||||||
|
seed,
|
||||||
|
prompt,
|
||||||
|
sampler,
|
||||||
|
cfg,
|
||||||
|
step,
|
||||||
|
perlin,
|
||||||
|
threshold,
|
||||||
|
init_img,
|
||||||
|
strength,
|
||||||
|
dimensions,
|
||||||
|
) = tuple(p)
|
||||||
|
(width, height) = dimensions.split("x")
|
||||||
|
switch_args = (
|
||||||
|
f"!switch {model}\n"
|
||||||
|
if always_switch_models or previous_model != model
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
image_args = f"-I{init_img} -f{strength}" if init_img else ""
|
||||||
|
command = f"{switch_args}{prompt} -S{seed} -A{sampler} -C{cfg} -s{step} {image_args} --perlin={perlin} --threshold={threshold} -W{width} -H{height}"
|
||||||
|
result.append(command)
|
||||||
|
previous_model = model
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def expand_prompt(
|
||||||
|
stanza: str | dict | listconfig.ListConfig | dictconfig.DictConfig,
|
||||||
|
) -> list | range:
|
||||||
|
if not stanza:
|
||||||
|
return None
|
||||||
|
if isinstance(stanza, listconfig.ListConfig):
|
||||||
|
return stanza
|
||||||
|
if isinstance(stanza, str):
|
||||||
|
return [stanza]
|
||||||
|
if not isinstance(stanza, dictconfig.DictConfig):
|
||||||
|
raise ValueError(f"Unrecognized template: {stanza}")
|
||||||
|
|
||||||
|
if not (template := stanza.get("template")):
|
||||||
|
raise KeyError('"prompt" section must contain a "template" definition')
|
||||||
|
|
||||||
|
fragment_labels = re.findall("{([^{}]+?)}", template)
|
||||||
|
if len(fragment_labels) == 0:
|
||||||
|
return [template]
|
||||||
|
fragments = [[{x: y} for y in stanza.get(x)] for x in fragment_labels]
|
||||||
|
dicts = merge(product(*fragments))
|
||||||
|
return [template.format(**x) for x in dicts]
|
||||||
|
|
||||||
|
|
||||||
|
def merge(dicts: Iterable) -> List[dict]:
|
||||||
|
result = list()
|
||||||
|
for x in dicts:
|
||||||
|
to_merge = dict()
|
||||||
|
for item in x:
|
||||||
|
to_merge = to_merge | item
|
||||||
|
result.append(to_merge)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def expand_values(stanza: str | dict | listconfig.ListConfig) -> list | range:
|
||||||
|
if not stanza:
|
||||||
|
return None
|
||||||
|
if isinstance(stanza, listconfig.ListConfig):
|
||||||
|
return stanza
|
||||||
|
elif match := re.match("^(-?\d+);(-?\d+)(;(\d+))?", str(stanza)):
|
||||||
|
(start, stop, step) = (
|
||||||
|
int(match.group(1)),
|
||||||
|
int(match.group(2)),
|
||||||
|
int(match.group(4)) or 1,
|
||||||
|
)
|
||||||
|
return range(start, stop + step, step)
|
||||||
|
elif match := re.match("^(-?[\d.]+);(-?[\d.]+)(;([\d.]+))?", str(stanza)):
|
||||||
|
(start, stop, step) = (
|
||||||
|
float(match.group(1)),
|
||||||
|
float(match.group(2)),
|
||||||
|
float(match.group(4)) or 1.0,
|
||||||
|
)
|
||||||
|
return np.arange(start, stop + step, step).tolist()
|
||||||
|
else:
|
||||||
|
return [stanza]
|
||||||
|
|
||||||
|
|
||||||
|
def _yaml_to_json(yaml_input: str) -> str:
|
||||||
|
"""
|
||||||
|
Converts a yaml string into a json string. Used internally
|
||||||
|
to generate the example template file.
|
||||||
|
"""
|
||||||
|
with io.StringIO(yaml_input) as yaml_in:
|
||||||
|
data = yaml.safe_load(yaml_in)
|
||||||
|
return json.dumps(data, indent=2)
|
||||||
|
|
||||||
|
|
||||||
|
HELP = """
|
||||||
|
This script takes a prompt template file that contains multiple
|
||||||
|
alternative values for the prompt and its generation arguments (such
|
||||||
|
as steps). It then expands out the prompts using all combinations of
|
||||||
|
arguments and either prints them to the terminal's standard output, or
|
||||||
|
feeds the prompts directly to the invokeai command-line interface.
|
||||||
|
|
||||||
|
Call this script again with --instructions (-i) for verbose instructions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
INSTRUCTIONS = f"""
|
||||||
|
== INTRODUCTION ==
|
||||||
|
This script takes a prompt template file that contains multiple
|
||||||
|
alternative values for the prompt and its generation arguments (such
|
||||||
|
as steps). It then expands out the prompts using all combinations of
|
||||||
|
arguments and either prints them to the terminal's standard output, or
|
||||||
|
feeds the prompts directly to the invokeai command-line interface.
|
||||||
|
|
||||||
|
If the optional --invoke argument is provided, then the generated
|
||||||
|
prompts will be fed directly to invokeai for image generation. You
|
||||||
|
will likely want to add the --outdir option in order to save the image
|
||||||
|
files to their own folder.
|
||||||
|
|
||||||
|
{sys.argv[0]} --invoke --outdir=/tmp/outputs my_template.yaml
|
||||||
|
|
||||||
|
If --invoke isn't specified, the expanded prompts will be printed to
|
||||||
|
output. You can capture them to a file for inspection and editing this
|
||||||
|
way:
|
||||||
|
|
||||||
|
{sys.argv[0]} my_template.yaml > prompts.txt
|
||||||
|
|
||||||
|
And then feed them to invokeai this way:
|
||||||
|
|
||||||
|
invokeai --outdir=/tmp/outputs < prompts.txt
|
||||||
|
|
||||||
|
Note that after invokeai finishes processing the list of prompts, the
|
||||||
|
output directory will contain a markdown file named `log.md`
|
||||||
|
containing annotated images. You can open this file using an e-book
|
||||||
|
reader such as the cross-platform Calibre eBook reader
|
||||||
|
(https://calibre-ebook.com/).
|
||||||
|
|
||||||
|
== FORMAT OF THE TEMPLATES FILE ==
|
||||||
|
|
||||||
|
This will generate an example template file that you can get
|
||||||
|
started with:
|
||||||
|
|
||||||
|
{sys.argv[0]} --example > example.yaml
|
||||||
|
|
||||||
|
An excerpt from the top of this file looks like this:
|
||||||
|
|
||||||
|
model:
|
||||||
|
- stable-diffusion-1.5
|
||||||
|
- stable-diffusion-2.1-base
|
||||||
|
steps: 30;50;1 # start steps at 30 and go up to 50, incrementing by 1 each time
|
||||||
|
seed: 50 # fixed constant, seed=50
|
||||||
|
cfg: # list of CFG values to try
|
||||||
|
- 7
|
||||||
|
- 8
|
||||||
|
- 12
|
||||||
|
prompt: a walk in the park # constant value
|
||||||
|
|
||||||
|
In more detail, the template file can one or more of the
|
||||||
|
following sections:
|
||||||
|
- model:
|
||||||
|
- steps:
|
||||||
|
- seed:
|
||||||
|
- cfg:
|
||||||
|
- sampler:
|
||||||
|
- prompt:
|
||||||
|
- init_img:
|
||||||
|
- perlin:
|
||||||
|
- threshold:
|
||||||
|
- strength
|
||||||
|
|
||||||
|
- Each section can have a constant value such as this:
|
||||||
|
steps: 50
|
||||||
|
- Or a range of numeric values in the format:
|
||||||
|
steps: <start>;<stop>;<step> (note semicolon, not colon!)
|
||||||
|
- Or a list of values in the format:
|
||||||
|
- value1
|
||||||
|
- value2
|
||||||
|
- value3
|
||||||
|
|
||||||
|
The "prompt:" section is special. It can accept a constant value:
|
||||||
|
|
||||||
|
prompt: a walk in the woods in the style of donatello
|
||||||
|
|
||||||
|
Or it can accept a list of prompts:
|
||||||
|
|
||||||
|
prompt:
|
||||||
|
- a walk in the woods
|
||||||
|
- a walk on the beach
|
||||||
|
|
||||||
|
Or it can accept a templated list of prompts. These allow you to
|
||||||
|
define a series of phrases, each of which is a list. You then combine
|
||||||
|
them together into a prompt template in this way:
|
||||||
|
|
||||||
|
prompt:
|
||||||
|
style:
|
||||||
|
- oil painting
|
||||||
|
- watercolor
|
||||||
|
- comic book
|
||||||
|
- studio photography
|
||||||
|
subject:
|
||||||
|
- sunny meadow in the mountains
|
||||||
|
- gathering storm in the mountains
|
||||||
|
template: a {{subject}} in the style of {{style}}
|
||||||
|
|
||||||
|
In the example above, the phrase names "style" and "subject" are
|
||||||
|
examples only. You can use whatever you like. However, the "template:"
|
||||||
|
field is required. The output will be:
|
||||||
|
|
||||||
|
"a sunny meadow in the mountains in the style of an oil painting"
|
||||||
|
"a sunny meadow in the mountains in the style of watercolor masterpiece"
|
||||||
|
...
|
||||||
|
"a gathering storm in the mountains in the style of an ink sketch"
|
||||||
|
|
||||||
|
== SUPPORT FOR JSON FORMAT ==
|
||||||
|
|
||||||
|
For those who prefer the JSON format, this script will accept JSON
|
||||||
|
template files as well. Please run "{sys.argv[0]} --json-example"
|
||||||
|
to print out a version of the example template file in json format.
|
||||||
|
You may save it to disk and use it as a starting point for your own
|
||||||
|
template this way:
|
||||||
|
|
||||||
|
{sys.argv[0]} --json-example > template.json
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLE_TEMPLATE_FILE = """
|
||||||
|
model: stable-diffusion-1.5
|
||||||
|
steps: 30;50;10
|
||||||
|
seed: 50
|
||||||
|
dimensions: 512x512
|
||||||
|
perlin: 0.0
|
||||||
|
threshold: 0
|
||||||
|
cfg:
|
||||||
|
- 7
|
||||||
|
- 12
|
||||||
|
sampler:
|
||||||
|
- k_euler_a
|
||||||
|
- k_lms
|
||||||
|
prompt:
|
||||||
|
style:
|
||||||
|
- oil painting
|
||||||
|
- watercolor
|
||||||
|
location:
|
||||||
|
- the mountains
|
||||||
|
- a desert
|
||||||
|
object:
|
||||||
|
- luxurious dwelling
|
||||||
|
- crude tent
|
||||||
|
template: a {object} in {location}, in the style of {style}
|
||||||
|
"""
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@ -88,16 +88,13 @@ def global_cache_dir(subdir:Union[str,Path]='')->Path:
|
|||||||
'''
|
'''
|
||||||
Returns Path to the model cache directory. If a subdirectory
|
Returns Path to the model cache directory. If a subdirectory
|
||||||
is provided, it will be appended to the end of the path, allowing
|
is provided, it will be appended to the end of the path, allowing
|
||||||
for huggingface-style conventions:
|
for Hugging Face-style conventions. Currently, Hugging Face has
|
||||||
global_cache_dir('diffusers')
|
moved all models into the "hub" subfolder, so for any pretrained
|
||||||
|
HF model, use:
|
||||||
global_cache_dir('hub')
|
global_cache_dir('hub')
|
||||||
Current HuggingFace documentation (mid-Jan 2023) indicates that
|
|
||||||
transformers models will be cached into a "transformers" subdirectory,
|
The legacy location for transformers used to be global_cache_dir('transformers')
|
||||||
but in practice they seem to go into "hub". But if needed:
|
and global_cache_dir('diffusers') for diffusers.
|
||||||
global_cache_dir('transformers')
|
|
||||||
One other caveat is that HuggingFace is moving some diffusers models
|
|
||||||
into the "hub" subdirectory as well, so this will need to be revisited
|
|
||||||
from time to time.
|
|
||||||
'''
|
'''
|
||||||
home: str = os.getenv('HF_HOME')
|
home: str = os.getenv('HF_HOME')
|
||||||
|
|
||||||
|
@ -437,10 +437,10 @@ def main():
|
|||||||
args = _parse_args()
|
args = _parse_args()
|
||||||
global_set_root(args.root_dir)
|
global_set_root(args.root_dir)
|
||||||
|
|
||||||
cache_dir = str(global_cache_dir("diffusers"))
|
cache_dir = str(global_cache_dir("hub"))
|
||||||
os.environ[
|
os.environ[
|
||||||
"HF_HOME"
|
"HF_HOME"
|
||||||
] = cache_dir # because not clear the merge pipeline is honoring cache_dir
|
] = str(global_cache_dir()) # because not clear the merge pipeline is honoring cache_dir
|
||||||
args.cache_dir = cache_dir
|
args.cache_dir = cache_dir
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -19,7 +19,7 @@ import warnings
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from shutil import move, rmtree
|
from shutil import move, rmtree
|
||||||
from typing import Any, Optional, Union
|
from typing import Any, Optional, Union, Callable
|
||||||
|
|
||||||
import safetensors
|
import safetensors
|
||||||
import safetensors.torch
|
import safetensors.torch
|
||||||
@ -47,6 +47,8 @@ class SDLegacyType(Enum):
|
|||||||
V1 = 1
|
V1 = 1
|
||||||
V1_INPAINT = 2
|
V1_INPAINT = 2
|
||||||
V2 = 3
|
V2 = 3
|
||||||
|
V2_e = 4
|
||||||
|
V2_v = 5
|
||||||
UNKNOWN = 99
|
UNKNOWN = 99
|
||||||
|
|
||||||
|
|
||||||
@ -280,13 +282,13 @@ class ModelManager(object):
|
|||||||
self.stack.remove(model_name)
|
self.stack.remove(model_name)
|
||||||
if delete_files:
|
if delete_files:
|
||||||
if weights:
|
if weights:
|
||||||
print(f"** deleting file {weights}")
|
print(f"** Deleting file {weights}")
|
||||||
Path(weights).unlink(missing_ok=True)
|
Path(weights).unlink(missing_ok=True)
|
||||||
elif path:
|
elif path:
|
||||||
print(f"** deleting directory {path}")
|
print(f"** Deleting directory {path}")
|
||||||
rmtree(path, ignore_errors=True)
|
rmtree(path, ignore_errors=True)
|
||||||
elif repo_id:
|
elif repo_id:
|
||||||
print(f"** deleting the cached model directory for {repo_id}")
|
print(f"** Deleting the cached model directory for {repo_id}")
|
||||||
self._delete_model_from_cache(repo_id)
|
self._delete_model_from_cache(repo_id)
|
||||||
|
|
||||||
def add_model(
|
def add_model(
|
||||||
@ -418,11 +420,6 @@ class ModelManager(object):
|
|||||||
"NOHASH",
|
"NOHASH",
|
||||||
)
|
)
|
||||||
|
|
||||||
# scan model
|
|
||||||
self.scan_model(model_name, weights)
|
|
||||||
|
|
||||||
print(f">> Loading {model_name} from {weights}")
|
|
||||||
|
|
||||||
# for usage statistics
|
# for usage statistics
|
||||||
if self._has_cuda():
|
if self._has_cuda():
|
||||||
torch.cuda.reset_peak_memory_stats()
|
torch.cuda.reset_peak_memory_stats()
|
||||||
@ -436,10 +433,13 @@ class ModelManager(object):
|
|||||||
weight_bytes = f.read()
|
weight_bytes = f.read()
|
||||||
model_hash = self._cached_sha256(weights, weight_bytes)
|
model_hash = self._cached_sha256(weights, weight_bytes)
|
||||||
sd = None
|
sd = None
|
||||||
if weights.endswith(".safetensors"):
|
|
||||||
sd = safetensors.torch.load(weight_bytes)
|
if weights.endswith(".ckpt"):
|
||||||
else:
|
self.scan_model(model_name, weights)
|
||||||
sd = torch.load(io.BytesIO(weight_bytes), map_location="cpu")
|
sd = torch.load(io.BytesIO(weight_bytes), map_location="cpu")
|
||||||
|
else:
|
||||||
|
sd = safetensors.torch.load(weight_bytes)
|
||||||
|
|
||||||
del weight_bytes
|
del weight_bytes
|
||||||
# merged models from auto11 merge board are flat for some reason
|
# merged models from auto11 merge board are flat for some reason
|
||||||
if "state_dict" in sd:
|
if "state_dict" in sd:
|
||||||
@ -462,18 +462,12 @@ class ModelManager(object):
|
|||||||
vae = os.path.normpath(os.path.join(Globals.root, vae))
|
vae = os.path.normpath(os.path.join(Globals.root, vae))
|
||||||
if os.path.exists(vae):
|
if os.path.exists(vae):
|
||||||
print(f" | Loading VAE weights from: {vae}")
|
print(f" | Loading VAE weights from: {vae}")
|
||||||
vae_ckpt = None
|
if vae.endswith((".ckpt",".pt")):
|
||||||
vae_dict = None
|
self.scan_model(vae,vae)
|
||||||
if vae.endswith(".safetensors"):
|
|
||||||
vae_ckpt = safetensors.torch.load_file(vae)
|
|
||||||
vae_dict = {k: v for k, v in vae_ckpt.items() if k[0:4] != "loss"}
|
|
||||||
else:
|
|
||||||
vae_ckpt = torch.load(vae, map_location="cpu")
|
vae_ckpt = torch.load(vae, map_location="cpu")
|
||||||
vae_dict = {
|
else:
|
||||||
k: v
|
vae_ckpt = safetensors.torch.load_file(vae)
|
||||||
for k, v in vae_ckpt["state_dict"].items()
|
vae_dict = {k: v for k, v in vae_ckpt.items() if k[0:4] != "loss"}
|
||||||
if k[0:4] != "loss"
|
|
||||||
}
|
|
||||||
model.first_stage_model.load_state_dict(vae_dict, strict=False)
|
model.first_stage_model.load_state_dict(vae_dict, strict=False)
|
||||||
else:
|
else:
|
||||||
print(f" | VAE file {vae} not found. Skipping.")
|
print(f" | VAE file {vae} not found. Skipping.")
|
||||||
@ -495,9 +489,9 @@ class ModelManager(object):
|
|||||||
|
|
||||||
print(f">> Loading diffusers model from {name_or_path}")
|
print(f">> Loading diffusers model from {name_or_path}")
|
||||||
if using_fp16:
|
if using_fp16:
|
||||||
print(" | Using faster float16 precision")
|
print(" | Using faster float16 precision")
|
||||||
else:
|
else:
|
||||||
print(" | Using more accurate float32 precision")
|
print(" | Using more accurate float32 precision")
|
||||||
|
|
||||||
# TODO: scan weights maybe?
|
# TODO: scan weights maybe?
|
||||||
pipeline_args: dict[str, Any] = dict(
|
pipeline_args: dict[str, Any] = dict(
|
||||||
@ -507,7 +501,7 @@ class ModelManager(object):
|
|||||||
if vae := self._load_vae(mconfig["vae"]):
|
if vae := self._load_vae(mconfig["vae"]):
|
||||||
pipeline_args.update(vae=vae)
|
pipeline_args.update(vae=vae)
|
||||||
if not isinstance(name_or_path, Path):
|
if not isinstance(name_or_path, Path):
|
||||||
pipeline_args.update(cache_dir=global_cache_dir("diffusers"))
|
pipeline_args.update(cache_dir=global_cache_dir("hub"))
|
||||||
if using_fp16:
|
if using_fp16:
|
||||||
pipeline_args.update(torch_dtype=torch.float16)
|
pipeline_args.update(torch_dtype=torch.float16)
|
||||||
fp_args_list = [{"revision": "fp16"}, {}]
|
fp_args_list = [{"revision": "fp16"}, {}]
|
||||||
@ -549,7 +543,7 @@ class ModelManager(object):
|
|||||||
width = pipeline.unet.config.sample_size * pipeline.vae_scale_factor
|
width = pipeline.unet.config.sample_size * pipeline.vae_scale_factor
|
||||||
height = width
|
height = width
|
||||||
|
|
||||||
print(f" | Default image dimensions = {width} x {height}")
|
print(f" | Default image dimensions = {width} x {height}")
|
||||||
|
|
||||||
return pipeline, width, height, model_hash
|
return pipeline, width, height, model_hash
|
||||||
|
|
||||||
@ -589,13 +583,14 @@ class ModelManager(object):
|
|||||||
if self._has_cuda():
|
if self._has_cuda():
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
def scan_model(self, model_name, checkpoint):
|
def scan_model(self, model_name, checkpoint):
|
||||||
"""
|
"""
|
||||||
Apply picklescanner to the indicated checkpoint and issue a warning
|
Apply picklescanner to the indicated checkpoint and issue a warning
|
||||||
and option to exit if an infected file is identified.
|
and option to exit if an infected file is identified.
|
||||||
"""
|
"""
|
||||||
# scan model
|
# scan model
|
||||||
print(f">> Scanning Model: {model_name}")
|
print(f" | Scanning Model: {model_name}")
|
||||||
scan_result = scan_file_path(checkpoint)
|
scan_result = scan_file_path(checkpoint)
|
||||||
if scan_result.infected_files != 0:
|
if scan_result.infected_files != 0:
|
||||||
if scan_result.infected_files == 1:
|
if scan_result.infected_files == 1:
|
||||||
@ -618,13 +613,13 @@ class ModelManager(object):
|
|||||||
print("### Exiting InvokeAI")
|
print("### Exiting InvokeAI")
|
||||||
sys.exit()
|
sys.exit()
|
||||||
else:
|
else:
|
||||||
print(">> Model scanned ok")
|
print(" | Model scanned ok")
|
||||||
|
|
||||||
def import_diffuser_model(
|
def import_diffuser_model(
|
||||||
self,
|
self,
|
||||||
repo_or_path: Union[str, Path],
|
repo_or_path: Union[str, Path],
|
||||||
model_name: str = None,
|
model_name: str = None,
|
||||||
model_description: str = None,
|
description: str = None,
|
||||||
vae: dict = None,
|
vae: dict = None,
|
||||||
commit_to_conf: Path = None,
|
commit_to_conf: Path = None,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
@ -640,7 +635,7 @@ class ModelManager(object):
|
|||||||
models.yaml file.
|
models.yaml file.
|
||||||
"""
|
"""
|
||||||
model_name = model_name or Path(repo_or_path).stem
|
model_name = model_name or Path(repo_or_path).stem
|
||||||
model_description = model_description or f"Imported diffusers model {model_name}"
|
model_description = description or f"Imported diffusers model {model_name}"
|
||||||
new_config = dict(
|
new_config = dict(
|
||||||
description=model_description,
|
description=model_description,
|
||||||
vae=vae,
|
vae=vae,
|
||||||
@ -724,15 +719,25 @@ class ModelManager(object):
|
|||||||
format. Valid return values include:
|
format. Valid return values include:
|
||||||
SDLegacyType.V1
|
SDLegacyType.V1
|
||||||
SDLegacyType.V1_INPAINT
|
SDLegacyType.V1_INPAINT
|
||||||
SDLegacyType.V2
|
SDLegacyType.V2 (V2 prediction type unknown)
|
||||||
|
SDLegacyType.V2_e (V2 using 'epsilon' prediction type)
|
||||||
|
SDLegacyType.V2_v (V2 using 'v_prediction' prediction type)
|
||||||
SDLegacyType.UNKNOWN
|
SDLegacyType.UNKNOWN
|
||||||
"""
|
"""
|
||||||
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
|
global_step = checkpoint.get('global_step')
|
||||||
if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024:
|
state_dict = checkpoint.get("state_dict") or checkpoint
|
||||||
return SDLegacyType.V2
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
state_dict = checkpoint.get("state_dict") or checkpoint
|
state_dict = checkpoint.get("state_dict") or checkpoint
|
||||||
|
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
|
||||||
|
if key_name in state_dict and state_dict[key_name].shape[-1] == 1024:
|
||||||
|
if global_step == 220000:
|
||||||
|
return SDLegacyType.V2_e
|
||||||
|
elif global_step == 110000:
|
||||||
|
return SDLegacyType.V2_v
|
||||||
|
else:
|
||||||
|
return SDLegacyType.V2
|
||||||
|
# otherwise we assume a V1 file
|
||||||
in_channels = state_dict[
|
in_channels = state_dict[
|
||||||
"model.diffusion_model.input_blocks.0.0.weight"
|
"model.diffusion_model.input_blocks.0.0.weight"
|
||||||
].shape[1]
|
].shape[1]
|
||||||
@ -746,12 +751,14 @@ class ModelManager(object):
|
|||||||
return SDLegacyType.UNKNOWN
|
return SDLegacyType.UNKNOWN
|
||||||
|
|
||||||
def heuristic_import(
|
def heuristic_import(
|
||||||
self,
|
self,
|
||||||
path_url_or_repo: str,
|
path_url_or_repo: str,
|
||||||
convert: bool = False,
|
convert: bool = False,
|
||||||
model_name: str = None,
|
model_name: str = None,
|
||||||
description: str = None,
|
description: str = None,
|
||||||
commit_to_conf: Path = None,
|
model_config_file: Path = None,
|
||||||
|
commit_to_conf: Path = None,
|
||||||
|
config_file_callback: Callable[[Path],Path] = None,
|
||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Accept a string which could be:
|
Accept a string which could be:
|
||||||
@ -781,23 +788,25 @@ class ModelManager(object):
|
|||||||
"""
|
"""
|
||||||
model_path: Path = None
|
model_path: Path = None
|
||||||
thing = path_url_or_repo # to save typing
|
thing = path_url_or_repo # to save typing
|
||||||
|
is_temporary = False
|
||||||
|
|
||||||
print(f">> Probing {thing} for import")
|
print(f">> Probing {thing} for import")
|
||||||
|
|
||||||
if thing.startswith(("http:", "https:", "ftp:")):
|
if thing.startswith(("http:", "https:", "ftp:")):
|
||||||
print(f" | {thing} appears to be a URL")
|
print(f" | {thing} appears to be a URL")
|
||||||
model_path = self._resolve_path(
|
model_path = self._resolve_path(
|
||||||
thing, "models/ldm/stable-diffusion-v1"
|
thing, "models/ldm/stable-diffusion-v1"
|
||||||
) # _resolve_path does a download if needed
|
) # _resolve_path does a download if needed
|
||||||
|
is_temporary = True
|
||||||
|
|
||||||
elif Path(thing).is_file() and thing.endswith((".ckpt", ".safetensors")):
|
elif Path(thing).is_file() and thing.endswith((".ckpt", ".safetensors")):
|
||||||
if Path(thing).stem in ["model", "diffusion_pytorch_model"]:
|
if Path(thing).stem in ["model", "diffusion_pytorch_model"]:
|
||||||
print(
|
print(
|
||||||
f" | {Path(thing).name} appears to be part of a diffusers model. Skipping import"
|
f" | {Path(thing).name} appears to be part of a diffusers model. Skipping import"
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
print(f" | {thing} appears to be a checkpoint file on disk")
|
print(f" | {thing} appears to be a checkpoint file on disk")
|
||||||
model_path = self._resolve_path(thing, "models/ldm/stable-diffusion-v1")
|
model_path = self._resolve_path(thing, "models/ldm/stable-diffusion-v1")
|
||||||
|
|
||||||
elif Path(thing).is_dir() and Path(thing, "model_index.json").exists():
|
elif Path(thing).is_dir() and Path(thing, "model_index.json").exists():
|
||||||
@ -824,7 +833,10 @@ class ModelManager(object):
|
|||||||
Path(thing).rglob("*.safetensors")
|
Path(thing).rglob("*.safetensors")
|
||||||
):
|
):
|
||||||
if model_name := self.heuristic_import(
|
if model_name := self.heuristic_import(
|
||||||
str(m), convert, commit_to_conf=commit_to_conf
|
str(m),
|
||||||
|
convert,
|
||||||
|
commit_to_conf=commit_to_conf,
|
||||||
|
config_file_callback=config_file_callback,
|
||||||
):
|
):
|
||||||
print(f" >> {model_name} successfully imported")
|
print(f" >> {model_name} successfully imported")
|
||||||
return model_name
|
return model_name
|
||||||
@ -848,40 +860,61 @@ class ModelManager(object):
|
|||||||
|
|
||||||
if model_path.stem in self.config: # already imported
|
if model_path.stem in self.config: # already imported
|
||||||
print(" | Already imported. Skipping")
|
print(" | Already imported. Skipping")
|
||||||
return
|
return model_path.stem
|
||||||
|
|
||||||
# another round of heuristics to guess the correct config file.
|
# another round of heuristics to guess the correct config file.
|
||||||
checkpoint = (
|
checkpoint = None
|
||||||
safetensors.torch.load_file(model_path)
|
if model_path.suffix.endswith((".ckpt",".pt")):
|
||||||
if model_path.suffix == ".safetensors"
|
self.scan_model(model_path,model_path)
|
||||||
else torch.load(model_path)
|
checkpoint = torch.load(model_path)
|
||||||
)
|
|
||||||
model_type = self.probe_model_type(checkpoint)
|
|
||||||
|
|
||||||
model_config_file = None
|
|
||||||
if model_type == SDLegacyType.V1:
|
|
||||||
print(" | SD-v1 model detected")
|
|
||||||
model_config_file = Path(
|
|
||||||
Globals.root, "configs/stable-diffusion/v1-inference.yaml"
|
|
||||||
)
|
|
||||||
elif model_type == SDLegacyType.V1_INPAINT:
|
|
||||||
print(" | SD-v1 inpainting model detected")
|
|
||||||
model_config_file = Path(
|
|
||||||
Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml"
|
|
||||||
)
|
|
||||||
elif model_type == SDLegacyType.V2:
|
|
||||||
print(
|
|
||||||
" | SD-v2 model detected; model will be converted to diffusers format"
|
|
||||||
)
|
|
||||||
model_config_file = Path(
|
|
||||||
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
|
|
||||||
)
|
|
||||||
convert = True
|
|
||||||
else:
|
else:
|
||||||
|
checkpoint = safetensors.torch.load_file(model_path)
|
||||||
|
# additional probing needed if no config file provided
|
||||||
|
if model_config_file is None:
|
||||||
|
model_type = self.probe_model_type(checkpoint)
|
||||||
|
if model_type == SDLegacyType.V1:
|
||||||
|
print(" | SD-v1 model detected")
|
||||||
|
model_config_file = Path(
|
||||||
|
Globals.root, "configs/stable-diffusion/v1-inference.yaml"
|
||||||
|
)
|
||||||
|
elif model_type == SDLegacyType.V1_INPAINT:
|
||||||
|
print(" | SD-v1 inpainting model detected")
|
||||||
|
model_config_file = Path(
|
||||||
|
Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml"
|
||||||
|
)
|
||||||
|
elif model_type == SDLegacyType.V2_v:
|
||||||
|
print(
|
||||||
|
" | SD-v2-v model detected"
|
||||||
|
)
|
||||||
|
model_config_file = Path(
|
||||||
|
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
|
||||||
|
)
|
||||||
|
elif model_type == SDLegacyType.V2_e:
|
||||||
|
print(
|
||||||
|
" | SD-v2-e model detected"
|
||||||
|
)
|
||||||
|
model_config_file = Path(
|
||||||
|
Globals.root, "configs/stable-diffusion/v2-inference.yaml"
|
||||||
|
)
|
||||||
|
elif model_type == SDLegacyType.V2:
|
||||||
|
print(
|
||||||
|
f"** {thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide configuration file path."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print(
|
||||||
|
f"** {thing} is a legacy checkpoint file but not a known Stable Diffusion model. Please provide configuration file path."
|
||||||
|
)
|
||||||
|
|
||||||
|
if not model_config_file and config_file_callback:
|
||||||
|
model_config_file = config_file_callback(model_path)
|
||||||
|
if not model_config_file:
|
||||||
|
return
|
||||||
|
|
||||||
|
if model_config_file.name.startswith('v2'):
|
||||||
|
convert = True
|
||||||
print(
|
print(
|
||||||
f"** {thing} is a legacy checkpoint file but not in a known Stable Diffusion model. Skipping import"
|
" | This SD-v2 model will be converted to diffusers format for use"
|
||||||
)
|
)
|
||||||
return
|
|
||||||
|
|
||||||
if convert:
|
if convert:
|
||||||
diffuser_path = Path(
|
diffuser_path = Path(
|
||||||
@ -895,7 +928,12 @@ class ModelManager(object):
|
|||||||
model_description=description,
|
model_description=description,
|
||||||
original_config_file=model_config_file,
|
original_config_file=model_config_file,
|
||||||
commit_to_conf=commit_to_conf,
|
commit_to_conf=commit_to_conf,
|
||||||
|
scan_needed=False,
|
||||||
)
|
)
|
||||||
|
# in the event that this file was downloaded automatically prior to conversion
|
||||||
|
# we do not keep the original .ckpt/.safetensors around
|
||||||
|
if is_temporary:
|
||||||
|
model_path.unlink(missing_ok=True)
|
||||||
else:
|
else:
|
||||||
model_name = self.import_ckpt_model(
|
model_name = self.import_ckpt_model(
|
||||||
model_path,
|
model_path,
|
||||||
@ -915,14 +953,15 @@ class ModelManager(object):
|
|||||||
return model_name
|
return model_name
|
||||||
|
|
||||||
def convert_and_import(
|
def convert_and_import(
|
||||||
self,
|
self,
|
||||||
ckpt_path: Path,
|
ckpt_path: Path,
|
||||||
diffusers_path: Path,
|
diffusers_path: Path,
|
||||||
model_name=None,
|
model_name=None,
|
||||||
model_description=None,
|
model_description=None,
|
||||||
vae=None,
|
vae=None,
|
||||||
original_config_file: Path = None,
|
original_config_file: Path = None,
|
||||||
commit_to_conf: Path = None,
|
commit_to_conf: Path = None,
|
||||||
|
scan_needed: bool=True,
|
||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Convert a legacy ckpt weights file to diffuser model and import
|
Convert a legacy ckpt weights file to diffuser model and import
|
||||||
@ -957,11 +996,12 @@ class ModelManager(object):
|
|||||||
extract_ema=True,
|
extract_ema=True,
|
||||||
original_config_file=original_config_file,
|
original_config_file=original_config_file,
|
||||||
vae=vae_model,
|
vae=vae_model,
|
||||||
|
scan_needed=scan_needed,
|
||||||
)
|
)
|
||||||
print(
|
print(
|
||||||
f" | Success. Optimized model is now located at {str(diffusers_path)}"
|
f" | Success. Optimized model is now located at {str(diffusers_path)}"
|
||||||
)
|
)
|
||||||
print(f" | Writing new config file entry for {model_name}")
|
print(f" | Writing new config file entry for {model_name}")
|
||||||
new_config = dict(
|
new_config = dict(
|
||||||
path=str(diffusers_path),
|
path=str(diffusers_path),
|
||||||
description=model_description,
|
description=model_description,
|
||||||
@ -1088,9 +1128,12 @@ class ModelManager(object):
|
|||||||
to the 2.3.0 "diffusers" version. This should be a one-time operation, called at
|
to the 2.3.0 "diffusers" version. This should be a one-time operation, called at
|
||||||
script startup time.
|
script startup time.
|
||||||
"""
|
"""
|
||||||
# Three transformer models to check: bert, clip and safety checker
|
# Three transformer models to check: bert, clip and safety checker, and
|
||||||
|
# the diffusers as well
|
||||||
|
models_dir = Path(Globals.root, "models")
|
||||||
legacy_locations = [
|
legacy_locations = [
|
||||||
Path(
|
Path(
|
||||||
|
models_dir,
|
||||||
"CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker"
|
"CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker"
|
||||||
),
|
),
|
||||||
Path("bert-base-uncased/models--bert-base-uncased"),
|
Path("bert-base-uncased/models--bert-base-uncased"),
|
||||||
@ -1098,17 +1141,26 @@ class ModelManager(object):
|
|||||||
"openai/clip-vit-large-patch14/models--openai--clip-vit-large-patch14"
|
"openai/clip-vit-large-patch14/models--openai--clip-vit-large-patch14"
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
models_dir = Path(Globals.root, "models")
|
legacy_locations.extend(list(global_cache_dir("diffusers").glob('*')))
|
||||||
legacy_layout = False
|
legacy_layout = False
|
||||||
for model in legacy_locations:
|
for model in legacy_locations:
|
||||||
legacy_layout = legacy_layout or Path(models_dir, model).exists()
|
legacy_layout = legacy_layout or model.exists()
|
||||||
if not legacy_layout:
|
if not legacy_layout:
|
||||||
return
|
return
|
||||||
|
|
||||||
print(
|
print(
|
||||||
"** Legacy version <= 2.2.5 model directory layout detected. Reorganizing."
|
"""
|
||||||
|
>> ALERT:
|
||||||
|
>> The location of your previously-installed diffusers models needs to move from
|
||||||
|
>> invokeai/models/diffusers to invokeai/models/hub due to a change introduced by
|
||||||
|
>> diffusers version 0.14. InvokeAI will now move all models from the "diffusers" directory
|
||||||
|
>> into "hub" and then remove the diffusers directory. This is a quick, safe, one-time
|
||||||
|
>> operation. However if you have customized either of these directories and need to
|
||||||
|
>> make adjustments, please press ctrl-C now to abort and relaunch InvokeAI when you are ready.
|
||||||
|
>> Otherwise press <enter> to continue."""
|
||||||
)
|
)
|
||||||
print("** This is a quick one-time operation.")
|
print("** This is a quick one-time operation.")
|
||||||
|
input("continue> ")
|
||||||
|
|
||||||
# transformer files get moved into the hub directory
|
# transformer files get moved into the hub directory
|
||||||
if cls._is_huggingface_hub_directory_present():
|
if cls._is_huggingface_hub_directory_present():
|
||||||
@ -1120,33 +1172,20 @@ class ModelManager(object):
|
|||||||
for model in legacy_locations:
|
for model in legacy_locations:
|
||||||
source = models_dir / model
|
source = models_dir / model
|
||||||
dest = hub / model.stem
|
dest = hub / model.stem
|
||||||
|
if dest.exists() and not source.exists():
|
||||||
|
continue
|
||||||
print(f"** {source} => {dest}")
|
print(f"** {source} => {dest}")
|
||||||
if source.exists():
|
if source.exists():
|
||||||
if dest.exists():
|
if dest.is_symlink():
|
||||||
rmtree(source)
|
print(f"** Found symlink at {dest.name}. Not migrating.")
|
||||||
|
elif dest.exists():
|
||||||
|
if source.is_dir():
|
||||||
|
rmtree(source)
|
||||||
|
else:
|
||||||
|
source.unlink()
|
||||||
else:
|
else:
|
||||||
move(source, dest)
|
move(source, dest)
|
||||||
|
|
||||||
# anything else gets moved into the diffusers directory
|
|
||||||
if cls._is_huggingface_hub_directory_present():
|
|
||||||
diffusers = global_cache_dir("diffusers")
|
|
||||||
else:
|
|
||||||
diffusers = models_dir / "diffusers"
|
|
||||||
|
|
||||||
os.makedirs(diffusers, exist_ok=True)
|
|
||||||
for root, dirs, _ in os.walk(models_dir, topdown=False):
|
|
||||||
for dir in dirs:
|
|
||||||
full_path = Path(root, dir)
|
|
||||||
if full_path.is_relative_to(hub) or full_path.is_relative_to(diffusers):
|
|
||||||
continue
|
|
||||||
if Path(dir).match("models--*--*"):
|
|
||||||
dest = diffusers / dir
|
|
||||||
print(f"** {full_path} => {dest}")
|
|
||||||
if dest.exists():
|
|
||||||
rmtree(full_path)
|
|
||||||
else:
|
|
||||||
move(full_path, dest)
|
|
||||||
|
|
||||||
# now clean up by removing any empty directories
|
# now clean up by removing any empty directories
|
||||||
empty = [
|
empty = [
|
||||||
root
|
root
|
||||||
@ -1244,7 +1283,7 @@ class ModelManager(object):
|
|||||||
path = name_or_path
|
path = name_or_path
|
||||||
else:
|
else:
|
||||||
owner, repo = name_or_path.split("/")
|
owner, repo = name_or_path.split("/")
|
||||||
path = Path(global_cache_dir("diffusers") / f"models--{owner}--{repo}")
|
path = Path(global_cache_dir("hub") / f"models--{owner}--{repo}")
|
||||||
if not path.exists():
|
if not path.exists():
|
||||||
return None
|
return None
|
||||||
hashpath = path / "checksum.sha256"
|
hashpath = path / "checksum.sha256"
|
||||||
@ -1252,7 +1291,7 @@ class ModelManager(object):
|
|||||||
with open(hashpath) as f:
|
with open(hashpath) as f:
|
||||||
hash = f.read()
|
hash = f.read()
|
||||||
return hash
|
return hash
|
||||||
print(" | Calculating sha256 hash of model files")
|
print(" | Calculating sha256 hash of model files")
|
||||||
tic = time.time()
|
tic = time.time()
|
||||||
sha = hashlib.sha256()
|
sha = hashlib.sha256()
|
||||||
count = 0
|
count = 0
|
||||||
@ -1264,7 +1303,7 @@ class ModelManager(object):
|
|||||||
sha.update(chunk)
|
sha.update(chunk)
|
||||||
hash = sha.hexdigest()
|
hash = sha.hexdigest()
|
||||||
toc = time.time()
|
toc = time.time()
|
||||||
print(f" | sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic))
|
print(f" | sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic))
|
||||||
with open(hashpath, "w") as f:
|
with open(hashpath, "w") as f:
|
||||||
f.write(hash)
|
f.write(hash)
|
||||||
return hash
|
return hash
|
||||||
@ -1305,16 +1344,16 @@ class ModelManager(object):
|
|||||||
using_fp16 = self.precision == "float16"
|
using_fp16 = self.precision == "float16"
|
||||||
|
|
||||||
vae_args.update(
|
vae_args.update(
|
||||||
cache_dir=global_cache_dir("diffusers"),
|
cache_dir=global_cache_dir("hug"),
|
||||||
local_files_only=not Globals.internet_available,
|
local_files_only=not Globals.internet_available,
|
||||||
)
|
)
|
||||||
|
|
||||||
print(f" | Loading diffusers VAE from {name_or_path}")
|
print(f" | Loading diffusers VAE from {name_or_path}")
|
||||||
if using_fp16:
|
if using_fp16:
|
||||||
vae_args.update(torch_dtype=torch.float16)
|
vae_args.update(torch_dtype=torch.float16)
|
||||||
fp_args_list = [{"revision": "fp16"}, {}]
|
fp_args_list = [{"revision": "fp16"}, {}]
|
||||||
else:
|
else:
|
||||||
print(" | Using more accurate float32 precision")
|
print(" | Using more accurate float32 precision")
|
||||||
fp_args_list = [{}]
|
fp_args_list = [{}]
|
||||||
|
|
||||||
vae = None
|
vae = None
|
||||||
@ -1355,7 +1394,7 @@ class ModelManager(object):
|
|||||||
hashes_to_delete.add(revision.commit_hash)
|
hashes_to_delete.add(revision.commit_hash)
|
||||||
strategy = cache_info.delete_revisions(*hashes_to_delete)
|
strategy = cache_info.delete_revisions(*hashes_to_delete)
|
||||||
print(
|
print(
|
||||||
f"** deletion of this model is expected to free {strategy.expected_freed_size_str}"
|
f"** Deletion of this model is expected to free {strategy.expected_freed_size_str}"
|
||||||
)
|
)
|
||||||
strategy.execute()
|
strategy.execute()
|
||||||
|
|
||||||
|
@ -9,6 +9,8 @@ Exports function retrieve_metadata(path)
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
from filelock import FileLock
|
||||||
from PIL import PngImagePlugin, Image
|
from PIL import PngImagePlugin, Image
|
||||||
|
|
||||||
# -------------------image generation utils-----
|
# -------------------image generation utils-----
|
||||||
@ -19,8 +21,26 @@ class PngWriter:
|
|||||||
self.outdir = outdir
|
self.outdir = outdir
|
||||||
os.makedirs(outdir, exist_ok=True)
|
os.makedirs(outdir, exist_ok=True)
|
||||||
|
|
||||||
|
def unique_prefix(self)->str:
|
||||||
|
next_prefix_file = Path(self.outdir,'.next_prefix')
|
||||||
|
next_prefix_lock = Path(self.outdir,'.next_prefix.lock')
|
||||||
|
prefix = 0
|
||||||
|
with FileLock(next_prefix_lock):
|
||||||
|
if not next_prefix_file.exists():
|
||||||
|
prefix = self._unused_prefix()
|
||||||
|
else:
|
||||||
|
with open(next_prefix_file,'r') as file:
|
||||||
|
prefix = 0
|
||||||
|
try:
|
||||||
|
prefix=int(file.readline())
|
||||||
|
except (TypeError, ValueError):
|
||||||
|
prefix=self._unused_prefix()
|
||||||
|
with open(next_prefix_file,'w') as file:
|
||||||
|
file.write(str(prefix+1))
|
||||||
|
return f'{prefix:06}'
|
||||||
|
|
||||||
# gives the next unique prefix in outdir
|
# gives the next unique prefix in outdir
|
||||||
def unique_prefix(self):
|
def _unused_prefix(self)->int:
|
||||||
# sort reverse alphabetically until we find max+1
|
# sort reverse alphabetically until we find max+1
|
||||||
dirlist = sorted(os.listdir(self.outdir), reverse=True)
|
dirlist = sorted(os.listdir(self.outdir), reverse=True)
|
||||||
# find the first filename that matches our pattern or return 000000.0.png
|
# find the first filename that matches our pattern or return 000000.0.png
|
||||||
@ -28,8 +48,7 @@ class PngWriter:
|
|||||||
(f for f in dirlist if re.match('^(\d+)\..*\.png', f)),
|
(f for f in dirlist if re.match('^(\d+)\..*\.png', f)),
|
||||||
'0000000.0.png',
|
'0000000.0.png',
|
||||||
)
|
)
|
||||||
basecount = int(existing_name.split('.', 1)[0]) + 1
|
return int(existing_name.split('.', 1)[0]) + 1
|
||||||
return f'{basecount:06}'
|
|
||||||
|
|
||||||
# saves image named _image_ to outdir/name, writing metadata from prompt
|
# saves image named _image_ to outdir/name, writing metadata from prompt
|
||||||
# returns full path of output
|
# returns full path of output
|
||||||
@ -91,14 +110,12 @@ class PromptFormatter:
|
|||||||
switches.append(f'-H{opt.height or t2i.height}')
|
switches.append(f'-H{opt.height or t2i.height}')
|
||||||
switches.append(f'-C{opt.cfg_scale or t2i.cfg_scale}')
|
switches.append(f'-C{opt.cfg_scale or t2i.cfg_scale}')
|
||||||
switches.append(f'-A{opt.sampler_name or t2i.sampler_name}')
|
switches.append(f'-A{opt.sampler_name or t2i.sampler_name}')
|
||||||
# to do: put model name into the t2i object
|
|
||||||
# switches.append(f'--model{t2i.model_name}')
|
|
||||||
if opt.seamless or t2i.seamless:
|
if opt.seamless or t2i.seamless:
|
||||||
switches.append(f'--seamless')
|
switches.append('--seamless')
|
||||||
if opt.init_img:
|
if opt.init_img:
|
||||||
switches.append(f'-I{opt.init_img}')
|
switches.append(f'-I{opt.init_img}')
|
||||||
if opt.fit:
|
if opt.fit:
|
||||||
switches.append(f'--fit')
|
switches.append('--fit')
|
||||||
if opt.strength and opt.init_img is not None:
|
if opt.strength and opt.init_img is not None:
|
||||||
switches.append(f'-f{opt.strength or t2i.strength}')
|
switches.append(f'-f{opt.strength or t2i.strength}')
|
||||||
if opt.gfpgan_strength:
|
if opt.gfpgan_strength:
|
||||||
|
@ -17,6 +17,7 @@ from pathlib import Path
|
|||||||
from typing import List, Tuple
|
from typing import List, Tuple
|
||||||
|
|
||||||
import npyscreen
|
import npyscreen
|
||||||
|
from diffusers.utils.import_utils import is_xformers_available
|
||||||
from npyscreen import widget
|
from npyscreen import widget
|
||||||
from omegaconf import OmegaConf
|
from omegaconf import OmegaConf
|
||||||
|
|
||||||
@ -29,7 +30,7 @@ from ldm.invoke.training.textual_inversion_training import (
|
|||||||
TRAINING_DATA = "text-inversion-training-data"
|
TRAINING_DATA = "text-inversion-training-data"
|
||||||
TRAINING_DIR = "text-inversion-output"
|
TRAINING_DIR = "text-inversion-output"
|
||||||
CONF_FILE = "preferences.conf"
|
CONF_FILE = "preferences.conf"
|
||||||
|
XFORMERS_AVAILABLE = is_xformers_available()
|
||||||
|
|
||||||
class textualInversionForm(npyscreen.FormMultiPageAction):
|
class textualInversionForm(npyscreen.FormMultiPageAction):
|
||||||
resolutions = [512, 768, 1024]
|
resolutions = [512, 768, 1024]
|
||||||
@ -178,7 +179,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
|||||||
out_of=10000,
|
out_of=10000,
|
||||||
step=500,
|
step=500,
|
||||||
lowest=1,
|
lowest=1,
|
||||||
value=saved_args.get("max_train_steps", 3000),
|
value=saved_args.get("max_train_steps", 2500),
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.train_batch_size = self.add_widget_intelligent(
|
self.train_batch_size = self.add_widget_intelligent(
|
||||||
@ -187,7 +188,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
|||||||
out_of=50,
|
out_of=50,
|
||||||
step=1,
|
step=1,
|
||||||
lowest=1,
|
lowest=1,
|
||||||
value=saved_args.get("train_batch_size", 8),
|
value=saved_args.get("train_batch_size", 8 if XFORMERS_AVAILABLE else 3),
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.gradient_accumulation_steps = self.add_widget_intelligent(
|
self.gradient_accumulation_steps = self.add_widget_intelligent(
|
||||||
@ -225,7 +226,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction):
|
|||||||
self.enable_xformers_memory_efficient_attention = self.add_widget_intelligent(
|
self.enable_xformers_memory_efficient_attention = self.add_widget_intelligent(
|
||||||
npyscreen.Checkbox,
|
npyscreen.Checkbox,
|
||||||
name="Use xformers acceleration",
|
name="Use xformers acceleration",
|
||||||
value=saved_args.get("enable_xformers_memory_efficient_attention", False),
|
value=saved_args.get("enable_xformers_memory_efficient_attention", XFORMERS_AVAILABLE),
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.lr_scheduler = self.add_widget_intelligent(
|
self.lr_scheduler = self.add_widget_intelligent(
|
||||||
@ -428,8 +429,7 @@ def do_front_end(args: Namespace):
|
|||||||
print(str(e))
|
print(str(e))
|
||||||
print("** DETAILS:")
|
print("** DETAILS:")
|
||||||
print(traceback.format_exc())
|
print(traceback.format_exc())
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
global_set_root(args.root_dir or Globals.root)
|
global_set_root(args.root_dir or Globals.root)
|
||||||
|
@ -67,7 +67,7 @@ else:
|
|||||||
"nearest": PIL.Image.NEAREST,
|
"nearest": PIL.Image.NEAREST,
|
||||||
}
|
}
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
XFORMERS_AVAILABLE = is_xformers_available
|
||||||
|
|
||||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||||
check_min_version("0.10.0.dev0")
|
check_min_version("0.10.0.dev0")
|
||||||
@ -227,7 +227,7 @@ def parse_args():
|
|||||||
training_group.add_argument(
|
training_group.add_argument(
|
||||||
"--train_batch_size",
|
"--train_batch_size",
|
||||||
type=int,
|
type=int,
|
||||||
default=16,
|
default=8 if XFORMERS_AVAILABLE else 3,
|
||||||
help="Batch size (per device) for the training dataloader.",
|
help="Batch size (per device) for the training dataloader.",
|
||||||
)
|
)
|
||||||
training_group.add_argument("--num_train_epochs", type=int, default=100)
|
training_group.add_argument("--num_train_epochs", type=int, default=100)
|
||||||
@ -324,6 +324,7 @@ def parse_args():
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--enable_xformers_memory_efficient_attention",
|
"--enable_xformers_memory_efficient_attention",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
|
default=XFORMERS_AVAILABLE,
|
||||||
help="Whether or not to use xformers.",
|
help="Whether or not to use xformers.",
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -536,7 +537,7 @@ def do_textual_inversion_training(
|
|||||||
seed: int = None,
|
seed: int = None,
|
||||||
resolution: int = 512,
|
resolution: int = 512,
|
||||||
center_crop: bool = False,
|
center_crop: bool = False,
|
||||||
train_batch_size: int = 16,
|
train_batch_size: int = 4,
|
||||||
num_train_epochs: int = 100,
|
num_train_epochs: int = 100,
|
||||||
max_train_steps: int = 5000,
|
max_train_steps: int = 5000,
|
||||||
gradient_accumulation_steps: int = 1,
|
gradient_accumulation_steps: int = 1,
|
||||||
@ -634,7 +635,7 @@ def do_textual_inversion_training(
|
|||||||
assert (
|
assert (
|
||||||
pretrained_model_name_or_path
|
pretrained_model_name_or_path
|
||||||
), f"models.yaml error: neither 'repo_id' nor 'path' is defined for {model}"
|
), f"models.yaml error: neither 'repo_id' nor 'path' is defined for {model}"
|
||||||
pipeline_args = dict(cache_dir=global_cache_dir("diffusers"))
|
pipeline_args = dict(cache_dir=global_cache_dir("hub"))
|
||||||
|
|
||||||
# Load tokenizer
|
# Load tokenizer
|
||||||
if tokenizer_name:
|
if tokenizer_name:
|
||||||
|
28
mkdocs.yml
28
mkdocs.yml
@ -2,14 +2,14 @@
|
|||||||
|
|
||||||
# General
|
# General
|
||||||
site_name: InvokeAI Stable Diffusion Toolkit Docs
|
site_name: InvokeAI Stable Diffusion Toolkit Docs
|
||||||
site_url: https://invoke-ai.github.io/InvokeAI
|
site_url: !ENV [SITE_URL, 'https://invoke-ai.github.io/InvokeAI']
|
||||||
site_author: mauwii
|
site_author: mauwii
|
||||||
dev_addr: '127.0.0.1:8080'
|
dev_addr: '127.0.0.1:8080'
|
||||||
|
|
||||||
# Repository
|
# Repository
|
||||||
repo_name: 'invoke-ai/InvokeAI'
|
repo_name: !ENV [REPO_NAME, 'invoke-ai/InvokeAI']
|
||||||
repo_url: 'https://github.com/invoke-ai/InvokeAI'
|
repo_url: !ENV [REPO_URL, 'https://github.com/invoke-ai/InvokeAI']
|
||||||
edit_uri: edit/main/docs/
|
edit_uri: blob/main/docs/
|
||||||
|
|
||||||
# Copyright
|
# Copyright
|
||||||
copyright: Copyright © 2022 InvokeAI Team
|
copyright: Copyright © 2022 InvokeAI Team
|
||||||
@ -19,7 +19,8 @@ theme:
|
|||||||
name: material
|
name: material
|
||||||
icon:
|
icon:
|
||||||
repo: fontawesome/brands/github
|
repo: fontawesome/brands/github
|
||||||
edit: material/file-document-edit-outline
|
edit: material/pencil
|
||||||
|
view: material/eye
|
||||||
palette:
|
palette:
|
||||||
- media: '(prefers-color-scheme: light)'
|
- media: '(prefers-color-scheme: light)'
|
||||||
scheme: default
|
scheme: default
|
||||||
@ -33,6 +34,11 @@ theme:
|
|||||||
icon: material/lightbulb-outline
|
icon: material/lightbulb-outline
|
||||||
name: Switch to light mode
|
name: Switch to light mode
|
||||||
features:
|
features:
|
||||||
|
- content.action.edit
|
||||||
|
- content.action.view
|
||||||
|
- content.code.copy
|
||||||
|
- content.tabs.link
|
||||||
|
- navigation.indexes
|
||||||
- navigation.instant
|
- navigation.instant
|
||||||
- navigation.tabs
|
- navigation.tabs
|
||||||
- navigation.top
|
- navigation.top
|
||||||
@ -89,9 +95,9 @@ plugins:
|
|||||||
enable_creation_date: true
|
enable_creation_date: true
|
||||||
- redirects:
|
- redirects:
|
||||||
redirect_maps:
|
redirect_maps:
|
||||||
'installation/INSTALL_AUTOMATED.md': 'installation/010_INSTALL_AUTOMATED.md'
|
'installation/INSTALL_AUTOMATED.md': 'installation/010_INSTALL_AUTOMATED.md'
|
||||||
'installation/INSTALL_MANUAL.md': 'installation/020_INSTALL_MANUAL.md'
|
'installation/INSTALL_MANUAL.md': 'installation/020_INSTALL_MANUAL.md'
|
||||||
'installation/INSTALL_SOURCE.md': 'installation/020_INSTALL_MANUAL.md'
|
'installation/INSTALL_SOURCE.md': 'installation/020_INSTALL_MANUAL.md'
|
||||||
'installation/INSTALL_DOCKER.md': 'installation/040_INSTALL_DOCKER.md'
|
'installation/INSTALL_DOCKER.md': 'installation/040_INSTALL_DOCKER.md'
|
||||||
'installation/INSTALLING_MODELS.md': 'installation/050_INSTALLING_MODELS.md'
|
'installation/INSTALLING_MODELS.md': 'installation/050_INSTALLING_MODELS.md'
|
||||||
'installation/INSTALL_PATCHMATCH.md': 'installation/060_INSTALL_PATCHMATCH.md'
|
'installation/INSTALL_PATCHMATCH.md': 'installation/060_INSTALL_PATCHMATCH.md'
|
||||||
|
159
pyproject.toml
159
pyproject.toml
@ -1,46 +1,40 @@
|
|||||||
[build-system]
|
[build-system]
|
||||||
requires = ["setuptools~=65.5", "pip~=22.3", "wheel"]
|
|
||||||
build-backend = "setuptools.build_meta"
|
build-backend = "setuptools.build_meta"
|
||||||
|
requires = ["setuptools ~= 67.1", "wheel"]
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "InvokeAI"
|
authors = [{name = "The InvokeAI Project", email = "lincoln.stein@gmail.com"}]
|
||||||
description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process"
|
|
||||||
requires-python = ">=3.9, <3.11"
|
|
||||||
readme = { content-type = "text/markdown", file = "README.md" }
|
|
||||||
keywords = ["stable-diffusion", "AI"]
|
|
||||||
dynamic = ["version"]
|
|
||||||
license = { file = "LICENSE" }
|
|
||||||
authors = [{ name = "The InvokeAI Project", email = "lincoln.stein@gmail.com" }]
|
|
||||||
classifiers = [
|
classifiers = [
|
||||||
'Development Status :: 4 - Beta',
|
"Development Status :: 4 - Beta",
|
||||||
'Environment :: GPU',
|
"Environment :: GPU :: NVIDIA CUDA",
|
||||||
'Environment :: GPU :: NVIDIA CUDA',
|
"Environment :: GPU",
|
||||||
'Environment :: MacOS X',
|
"Environment :: MacOS X",
|
||||||
'Intended Audience :: End Users/Desktop',
|
"Intended Audience :: Developers",
|
||||||
'Intended Audience :: Developers',
|
"Intended Audience :: End Users/Desktop",
|
||||||
'License :: OSI Approved :: MIT License',
|
"License :: OSI Approved :: MIT License",
|
||||||
'Operating System :: POSIX :: Linux',
|
"Operating System :: MacOS",
|
||||||
'Operating System :: MacOS',
|
"Operating System :: Microsoft :: Windows",
|
||||||
'Operating System :: Microsoft :: Windows',
|
"Operating System :: POSIX :: Linux",
|
||||||
'Programming Language :: Python :: 3 :: Only',
|
"Programming Language :: Python :: 3 :: Only",
|
||||||
'Programming Language :: Python :: 3.8',
|
"Programming Language :: Python :: 3",
|
||||||
'Programming Language :: Python :: 3.9',
|
"Programming Language :: Python :: 3.10",
|
||||||
'Programming Language :: Python :: 3.10',
|
"Programming Language :: Python :: 3.9",
|
||||||
'Topic :: Artistic Software',
|
"Programming Language :: Python",
|
||||||
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
|
"Topic :: Artistic Software",
|
||||||
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
|
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
|
||||||
'Topic :: Multimedia :: Graphics',
|
"Topic :: Internet :: WWW/HTTP :: WSGI :: Server",
|
||||||
'Topic :: Scientific/Engineering :: Artificial Intelligence',
|
"Topic :: Multimedia :: Graphics",
|
||||||
'Topic :: Scientific/Engineering :: Image Processing',
|
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
||||||
|
"Topic :: Scientific/Engineering :: Image Processing",
|
||||||
]
|
]
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"accelerate",
|
"accelerate~=0.16",
|
||||||
"albumentations",
|
"albumentations",
|
||||||
"click",
|
"click",
|
||||||
"clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
|
"clip_anytorch",
|
||||||
"compel==0.1.7",
|
"compel==0.1.7",
|
||||||
"datasets",
|
"datasets",
|
||||||
"diffusers[torch]~=0.13",
|
"diffusers[torch]~=0.14",
|
||||||
"dnspython==2.2.1",
|
"dnspython==2.2.1",
|
||||||
"einops",
|
"einops",
|
||||||
"eventlet",
|
"eventlet",
|
||||||
@ -54,7 +48,7 @@ dependencies = [
|
|||||||
"huggingface-hub>=0.11.1",
|
"huggingface-hub>=0.11.1",
|
||||||
"imageio",
|
"imageio",
|
||||||
"imageio-ffmpeg",
|
"imageio-ffmpeg",
|
||||||
"k-diffusion", # replacing "k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip",
|
"k-diffusion",
|
||||||
"kornia",
|
"kornia",
|
||||||
"npyscreen",
|
"npyscreen",
|
||||||
"numpy<1.24",
|
"numpy<1.24",
|
||||||
@ -62,75 +56,132 @@ dependencies = [
|
|||||||
"opencv-python",
|
"opencv-python",
|
||||||
"picklescan",
|
"picklescan",
|
||||||
"pillow",
|
"pillow",
|
||||||
"pudb",
|
|
||||||
"prompt-toolkit",
|
"prompt-toolkit",
|
||||||
|
"pudb",
|
||||||
"pypatchmatch",
|
"pypatchmatch",
|
||||||
"pyreadline3",
|
"pyreadline3",
|
||||||
"pytorch-lightning==1.7.7",
|
"pytorch-lightning==1.7.7",
|
||||||
"realesrgan",
|
"realesrgan",
|
||||||
"requests==2.28.2",
|
"requests==2.28.2",
|
||||||
"safetensors",
|
"safetensors~=0.3.0",
|
||||||
"scikit-image>=0.19",
|
"scikit-image>=0.19",
|
||||||
"send2trash",
|
"send2trash",
|
||||||
"streamlit",
|
"streamlit",
|
||||||
"taming-transformers-rom1504",
|
"taming-transformers-rom1504",
|
||||||
"test-tube>=0.7.5",
|
"test-tube>=0.7.5",
|
||||||
"torch>=1.13.1",
|
|
||||||
"torch-fidelity",
|
"torch-fidelity",
|
||||||
"torchvision>=0.14.1",
|
"torch~=1.13.1",
|
||||||
"torchmetrics",
|
"torchmetrics",
|
||||||
"transformers~=4.25",
|
"torchvision>=0.14.1",
|
||||||
|
"transformers~=4.26",
|
||||||
"windows-curses; sys_platform=='win32'",
|
"windows-curses; sys_platform=='win32'",
|
||||||
]
|
]
|
||||||
|
description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process"
|
||||||
|
dynamic = ["version"]
|
||||||
|
keywords = ["AI", "stable-diffusion"]
|
||||||
|
license = {text = "MIT"}
|
||||||
|
name = "InvokeAI"
|
||||||
|
readme = {content-type = "text/markdown", file = "README.md"}
|
||||||
|
requires-python = ">=3.9, <3.11"
|
||||||
|
|
||||||
[project.optional-dependencies]
|
[project.optional-dependencies]
|
||||||
|
"dev" = [
|
||||||
|
"black[jupyter]",
|
||||||
|
"flake8",
|
||||||
|
"flake8-black",
|
||||||
|
"flake8-bugbear",
|
||||||
|
"isort",
|
||||||
|
"pre-commit",
|
||||||
|
]
|
||||||
"dist" = ["pip-tools", "pipdeptree", "twine"]
|
"dist" = ["pip-tools", "pipdeptree", "twine"]
|
||||||
"docs" = [
|
"docs" = [
|
||||||
"mkdocs-material<9.0",
|
|
||||||
"mkdocs-git-revision-date-localized-plugin",
|
"mkdocs-git-revision-date-localized-plugin",
|
||||||
|
"mkdocs-material==9.*",
|
||||||
"mkdocs-redirects==1.2.0",
|
"mkdocs-redirects==1.2.0",
|
||||||
]
|
]
|
||||||
"test" = ["pytest>6.0.0", "pytest-cov"]
|
"test" = ["pytest-cov", "pytest>6.0.0"]
|
||||||
"xformers" = [
|
"xformers" = [
|
||||||
"xformers~=0.0.16; sys_platform!='darwin'",
|
"triton; sys_platform=='linux'",
|
||||||
"triton; sys_platform=='linux'",
|
"xformers~=0.0.16; sys_platform!='darwin'",
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
|
|
||||||
# legacy entrypoints; provided for backwards compatibility
|
# legacy entrypoints; provided for backwards compatibility
|
||||||
"invoke.py" = "ldm.invoke.CLI:main"
|
|
||||||
"configure_invokeai.py" = "ldm.invoke.config.invokeai_configure:main"
|
"configure_invokeai.py" = "ldm.invoke.config.invokeai_configure:main"
|
||||||
"textual_inversion.py" = "ldm.invoke.training.textual_inversion:main"
|
"invoke.py" = "ldm.invoke.CLI:main"
|
||||||
"merge_embeddings.py" = "ldm.invoke.merge_diffusers:main"
|
"merge_embeddings.py" = "ldm.invoke.merge_diffusers:main"
|
||||||
|
"textual_inversion.py" = "ldm.invoke.training.textual_inversion:main"
|
||||||
|
|
||||||
# modern entrypoints
|
# modern entrypoints
|
||||||
"invokeai" = "ldm.invoke.CLI:main"
|
"invokeai" = "ldm.invoke.CLI:main"
|
||||||
"invokeai-configure" = "ldm.invoke.config.invokeai_configure:main"
|
"invokeai-configure" = "ldm.invoke.config.invokeai_configure:main"
|
||||||
"invokeai-merge" = "ldm.invoke.merge_diffusers:main" # note name munging
|
|
||||||
"invokeai-ti" = "ldm.invoke.training.textual_inversion:main"
|
|
||||||
"invokeai-model-install" = "ldm.invoke.config.model_install:main"
|
"invokeai-model-install" = "ldm.invoke.config.model_install:main"
|
||||||
|
"invokeai-merge" = "ldm.invoke.merge_diffusers:main"
|
||||||
|
"invokeai-ti" = "ldm.invoke.training.textual_inversion:main"
|
||||||
"invokeai-update" = "ldm.invoke.config.invokeai_update:main"
|
"invokeai-update" = "ldm.invoke.config.invokeai_update:main"
|
||||||
|
"invokeai-batch" = "ldm.invoke.dynamic_prompts:main"
|
||||||
|
|
||||||
[project.urls]
|
[project.urls]
|
||||||
"Homepage" = "https://invoke-ai.github.io/InvokeAI/"
|
|
||||||
"Documentation" = "https://invoke-ai.github.io/InvokeAI/"
|
|
||||||
"Source" = "https://github.com/invoke-ai/InvokeAI/"
|
|
||||||
"Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues"
|
"Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues"
|
||||||
"Discord" = "https://discord.gg/ZmtBAhwWhy"
|
"Discord" = "https://discord.gg/ZmtBAhwWhy"
|
||||||
|
"Documentation" = "https://invoke-ai.github.io/InvokeAI/"
|
||||||
|
"Homepage" = "https://invoke-ai.github.io/InvokeAI/"
|
||||||
|
"Source" = "https://github.com/invoke-ai/InvokeAI/"
|
||||||
|
|
||||||
|
[tool.setuptools]
|
||||||
|
license-files = ["LICENSE"]
|
||||||
|
|
||||||
[tool.setuptools.dynamic]
|
[tool.setuptools.dynamic]
|
||||||
version = { attr = "ldm.invoke.__version__" }
|
version = {attr = "ldm.invoke.__version__"}
|
||||||
|
|
||||||
[tool.setuptools.packages.find]
|
[tool.setuptools.packages.find]
|
||||||
|
"include" = [
|
||||||
|
"invokeai.assets.web",
|
||||||
|
"invokeai.backend*",
|
||||||
|
"invokeai.configs*",
|
||||||
|
"invokeai.frontend.dist*",
|
||||||
|
"ldm*",
|
||||||
|
]
|
||||||
"where" = ["."]
|
"where" = ["."]
|
||||||
"include" = ["invokeai.assets.web*", "invokeai.backend*", "invokeai.frontend.dist*", "invokeai.configs*", "ldm*"]
|
|
||||||
|
|
||||||
[tool.setuptools.package-data]
|
[tool.setuptools.package-data]
|
||||||
"invokeai.assets.web" = ["**.png"]
|
"invokeai.assets.web" = ["**.png"]
|
||||||
"invokeai.backend" = ["**.png"]
|
"invokeai.configs" = ["**.example", "**.txt", "**.yaml", "**/*.yaml"]
|
||||||
"invokeai.configs" = ["*.example", "**/*.yaml", "*.txt"]
|
|
||||||
"invokeai.frontend.dist" = ["**"]
|
"invokeai.frontend.dist" = ["**"]
|
||||||
|
|
||||||
|
[tool.black]
|
||||||
|
extend-exclude = '''
|
||||||
|
/(
|
||||||
|
# skip legacy scripts
|
||||||
|
| scripts/orig_scripts
|
||||||
|
)/
|
||||||
|
'''
|
||||||
|
line-length = 88
|
||||||
|
target-version = ['py39']
|
||||||
|
|
||||||
|
[tool.isort]
|
||||||
|
atomic = true
|
||||||
|
extend_skip_glob = ["scripts/orig_scripts/*"]
|
||||||
|
filter_files = true
|
||||||
|
line_length = 120
|
||||||
|
profile = "black"
|
||||||
|
py_version = 39
|
||||||
|
remove_redundant_aliases = true
|
||||||
|
skip_gitignore = true
|
||||||
|
src_paths = ["installer", "invokeai", "ldm", "tests"]
|
||||||
|
virtual_env = ".venv"
|
||||||
|
|
||||||
|
[tool.coverage.run]
|
||||||
|
branch = true
|
||||||
|
parallel = true
|
||||||
|
|
||||||
|
[tool.coverage.report]
|
||||||
|
skip_covered = true
|
||||||
|
skip_empty = true
|
||||||
|
|
||||||
|
[tool.coverage.paths]
|
||||||
|
source = ["invokeai/backend", "ldm/invoke"]
|
||||||
|
|
||||||
[tool.pytest.ini_options]
|
[tool.pytest.ini_options]
|
||||||
addopts = "-p pytest_cov --junitxml=junit/test-results.xml --cov-report=term:skip-covered --cov=ldm/invoke --cov=backend --cov-branch"
|
addopts = ["--cov=invokeai/backend", "--cov=ldm/invoke"]
|
||||||
|
9
scripts/dynamic_prompts.py
Executable file
9
scripts/dynamic_prompts.py
Executable file
@ -0,0 +1,9 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
"""
|
||||||
|
Simple script to generate a file of InvokeAI prompts and settings
|
||||||
|
that scan across steps and other parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import ldm.invoke.dynamic_prompts
|
||||||
|
ldm.invoke.dynamic_prompts.main()
|
Reference in New Issue
Block a user