mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Compare commits
538 Commits
2.2.5-rc9
...
developmen
Author | SHA1 | Date | |
---|---|---|---|
971f5c5ab1 | |||
22133392b2 | |||
5e81f51f6a | |||
9fae65ed69 | |||
2443e5dc01 | |||
a9aa4e45aa | |||
9b6b27a156 | |||
b68074bb8f | |||
1f8e56672c | |||
f8708f5dbe | |||
103efea641 | |||
b60edab0fa | |||
6bc11bfd3f | |||
5897e511f1 | |||
f43b767b87 | |||
61cc41aa3f | |||
40c3ab0181 | |||
8999a5564b | |||
8423be539b | |||
6cc56043e2 | |||
62cda009dd | |||
45e51bac9a | |||
a514f9b236 | |||
90b21db86c | |||
bc44ab786c | |||
281a2e3ecb | |||
84cd96decf | |||
a3121b8137 | |||
3f6d0fb7da | |||
08ef4d62e9 | |||
81cb7fd1b7 | |||
7c658c6d76 | |||
495104e941 | |||
1e1f871ee1 | |||
30c5a0b067 | |||
9f02595ef2 | |||
b101334b4e | |||
0608d259dd | |||
12eff0dd42 | |||
939164eaa7 | |||
f2d2a49977 | |||
37535f5897 | |||
e5646dee27 | |||
d5011efaa1 | |||
74487a95a9 | |||
a50f4da9d1 | |||
4ae1df5b5e | |||
7f3ba16cd2 | |||
7a0438586c | |||
9adaf8f8ad | |||
a341297b0c | |||
9e0504abe5 | |||
3131edb255 | |||
b0697bc4ff | |||
1e9121c8d6 | |||
916e795c26 | |||
3aebe754fa | |||
3f0cfaac4a | |||
a3a0a87f55 | |||
f5e8ffe7b4 | |||
404d81f6fd | |||
c7864f8a6d | |||
9568ac66e0 | |||
d4280bbaaa | |||
46a5fd67ed | |||
b93336dbf9 | |||
9fe9301762 | |||
7f1b95fbda | |||
52c79fa097 | |||
62ac725ba9 | |||
db188cd3c3 | |||
e67ef4aec2 | |||
473869b8ed | |||
c8c1b3e217 | |||
fcd3ef1f98 | |||
a7f11a8c09 | |||
318426b67a | |||
6f3e99efc3 | |||
7515bcfe78 | |||
8d0ef022eb | |||
9f1c1cf2e6 | |||
d44112c209 | |||
b31f90c0bd | |||
344cdf0ade | |||
500bde5b0e | |||
df03927ec6 | |||
419f670f86 | |||
30dc9220c1 | |||
941d427302 | |||
876ae7f70f | |||
a86049f822 | |||
ec3d25d778 | |||
69a4a6fec5 | |||
7b76b79887 | |||
3ea732365c | |||
dc5d696ed2 | |||
0060551490 | |||
2fcc7d9b36 | |||
78217f5ef9 | |||
c6112e3295 | |||
8b08af714d | |||
723dcf4236 | |||
ddfd82559f | |||
8488575e5c | |||
7e4e51b224 | |||
f3b7316683 | |||
25b19b9ab8 | |||
9a6a970771 | |||
93de78b6e8 | |||
00da042dab | |||
6445e802f6 | |||
7caf20aad3 | |||
11969c2e2e | |||
e821b97cfc | |||
ef1dbdb33d | |||
0cdb7bb0cd | |||
306ed44e19 | |||
b0810e1ed7 | |||
089c85a017 | |||
a1d80fd106 | |||
d9c7a28c90 | |||
c787a3a801 | |||
1f772e4bdc | |||
cb7458db77 | |||
ef482b4d3e | |||
3e22160462 | |||
6a3d725dbb | |||
8a16c8a196 | |||
90eaac5134 | |||
896c2532c7 | |||
f68702520b | |||
286e46aaa3 | |||
088fd97418 | |||
e1e978b423 | |||
d27d92325d | |||
80f6f9a931 | |||
7dff8ccd31 | |||
3f6b275bec | |||
5ed6a31b97 | |||
b72b61b790 | |||
b81231823e | |||
c7c6940e1a | |||
6c33d1356d | |||
f08c78a043 | |||
b6dd5b664c | |||
76e7e82f5e | |||
d4376ed240 | |||
9d34213b4c | |||
b908f2b4bc | |||
9418324030 | |||
0f6856b719 | |||
83d8e69219 | |||
7f999e9dfc | |||
0c3ae232af | |||
9950790f4c | |||
b50a1eb63f | |||
d55b1e169c | |||
1071a12777 | |||
d987d0a336 | |||
50a67a7172 | |||
a3308c853d | |||
cde395e02f | |||
e7f670a5b6 | |||
917c576ddb | |||
dfc0c587b1 | |||
548bcaceb2 | |||
5fd43fca13 | |||
37a356d377 | |||
cccbfb12aa | |||
d018b2d7a7 | |||
e358adecdd | |||
cdc5f66592 | |||
b8cebf29f2 | |||
68aebad7ad | |||
ae4a44de3e | |||
2ab868314f | |||
bc46c46835 | |||
d82a21cfb2 | |||
87439feeb2 | |||
e5951ad098 | |||
4f51680307 | |||
d0ceabd372 | |||
2bda3d6d2f | |||
a96af7a15d | |||
93192b90f4 | |||
024acf42af | |||
04cb2d39cb | |||
c69573e65d | |||
84f702b6d0 | |||
bb70c32ad5 | |||
425a1713ab | |||
70e67c45dd | |||
07ca0876ec | |||
aa96a457b6 | |||
e28599cadb | |||
ae6dd219d9 | |||
19322fc1ec | |||
635e7da05d | |||
c0005eb063 | |||
74485411a8 | |||
ed70fc683c | |||
425d3bc95d | |||
3994c28b77 | |||
0100a63b59 | |||
432dc704a6 | |||
1d540219fa | |||
827f516baf | |||
48ad0c289c | |||
223e0529ba | |||
98e3bbb3bd | |||
e3efcc620c | |||
15dd1339d2 | |||
caf8f0ae35 | |||
cfb87bc116 | |||
c0ad1b3469 | |||
4382cd0b91 | |||
b049bbc64e | |||
34395ff490 | |||
1bc1085542 | |||
d7884432c9 | |||
82f6402d04 | |||
0e7b735611 | |||
5304ef504c | |||
17b295871f | |||
70dcfa1684 | |||
5d484273ed | |||
179656d541 | |||
73099af6ec | |||
c223d93b4d | |||
4e34194479 | |||
00e2674076 | |||
0a2e67df1a | |||
7831468304 | |||
88d02585e7 | |||
f82e82f1bb | |||
317762861f | |||
3f1360368d | |||
d5467e7db5 | |||
9284983429 | |||
bb79c78fe8 | |||
e3735ebb45 | |||
eb17dfdeaa | |||
1114ac97e2 | |||
c7ef41af54 | |||
7075a17091 | |||
7f0fb47cf3 | |||
775f032c56 | |||
5410d42da0 | |||
e21e901fa2 | |||
00385240e7 | |||
0a96d2a888 | |||
016551e036 | |||
b8bb46042c | |||
b44e9c7752 | |||
8ed10c732b | |||
82a53782d0 | |||
6adebf065f | |||
83f369053f | |||
77d3839860 | |||
c02a0da837 | |||
4f4c6bbe33 | |||
72ea5453ce | |||
458081f9c9 | |||
d1fbe81a60 | |||
6c7191712f | |||
248068fe5d | |||
9b281856ee | |||
fdf41cc739 | |||
d079445943 | |||
a997ab2cf6 | |||
e98068a546 | |||
b945ae4e01 | |||
b23c471cf0 | |||
964e584bd3 | |||
461358bdde | |||
2433cc344a | |||
bd2eea1c70 | |||
16df759499 | |||
5a1a36ec29 | |||
c76badfb08 | |||
71c4f401b0 | |||
c59b9897d9 | |||
4cf1c856ed | |||
a78a1020be | |||
90cb7a6442 | |||
8f5cded86e | |||
02d02a86b1 | |||
ba9c695463 | |||
8202f34f38 | |||
40a7f47d22 | |||
37bcf9cc47 | |||
0340d9ad53 | |||
0d35a67e9c | |||
1260e28d94 | |||
229f782e3b | |||
c15b839dd4 | |||
a095214e52 | |||
8e81425e89 | |||
c5cbe8f87d | |||
e0581a2c37 | |||
32f538bf3a | |||
3c5a14a814 | |||
0661256b61 | |||
602e35db65 | |||
bc7ece771d | |||
38bdb440d0 | |||
ce8c2bea2f | |||
3ac0f11e97 | |||
98fe49cb55 | |||
2b7e3abe57 | |||
150c4a5d2d | |||
0381a853b5 | |||
c79ec204ec | |||
8d3b1582a5 | |||
5fd7d71a7a | |||
1f0220697b | |||
18ae3949ef | |||
aa95510444 | |||
f33df25830 | |||
3a5a8ceba5 | |||
a1e5f17d1e | |||
303431be89 | |||
8e9f80cc97 | |||
3ad598761c | |||
b4eaf8b751 | |||
fa608efa11 | |||
e9d319bfde | |||
561721aef7 | |||
891c0f21d5 | |||
8973ce7d47 | |||
51c283ba56 | |||
7d262fc158 | |||
fdb16000ab | |||
f62cc7db9d | |||
9fa3e28dd4 | |||
9200b26f21 | |||
d998b2f806 | |||
ac8a7ff70b | |||
2d6e0baa87 | |||
c212b74990 | |||
0352979a8b | |||
70bd61d616 | |||
f2a6985c78 | |||
fe5a581313 | |||
2ec9792f50 | |||
a4204abfce | |||
274b276133 | |||
7707bc7818 | |||
4c035ad4ae | |||
e9090bca8f | |||
398a9bc0c6 | |||
38b9658c15 | |||
f04d1bab21 | |||
c23efb8e2b | |||
5604d3c447 | |||
206101f59d | |||
23348dcd3f | |||
9bf6013fdd | |||
1d11e06e6f | |||
47e6f94111 | |||
810fad9e06 | |||
853c6af623 | |||
1b6bbfb4db | |||
67e25624b9 | |||
9c218788e2 | |||
bb084a844b | |||
0a88243911 | |||
8a0a90d0f3 | |||
9141132a5c | |||
78f7bef1a3 | |||
1fb7b50be7 | |||
b57c81ab38 | |||
af040e97af | |||
8dc7f119e5 | |||
4b4111a802 | |||
832f183320 | |||
8aa94d5774 | |||
48aa6416dc | |||
47ddda1f64 | |||
c248ae44d4 | |||
9e4545b2fc | |||
8cf3883adc | |||
e06a6ed4c8 | |||
12a33f6e2d | |||
6d9638ba31 | |||
c54eb00055 | |||
72338506ed | |||
78c1d07c4b | |||
143b18af8a | |||
9d39d6ecb3 | |||
9686bf0ea8 | |||
7aa7be6b24 | |||
443c9110f1 | |||
ae0ce82609 | |||
f1982cb6d8 | |||
af62958323 | |||
9342ad8d97 | |||
5214742d02 | |||
178f0c78d8 | |||
2487040ae3 | |||
5606af5083 | |||
4b5a96501d | |||
ededeaed86 | |||
636620b1d5 | |||
21961f0c32 | |||
1fe41146f0 | |||
2ad6ef355a | |||
865502ee4f | |||
c7984f3299 | |||
7f150ed833 | |||
badf4e256c | |||
e64c60bbb3 | |||
1780618543 | |||
f91fd27624 | |||
09e41e8f76 | |||
6eeb2107b3 | |||
17053ad8b7 | |||
fefb4dc1f8 | |||
d05b1b3544 | |||
82d4904c07 | |||
1cdcf33cfa | |||
6616fa835a | |||
7b9a4564b1 | |||
fcdefa0620 | |||
ef8b3ce639 | |||
36870a8f53 | |||
b70420951d | |||
1f0c5b4cf1 | |||
8648da8111 | |||
45b4593563 | |||
41b04316cf | |||
e97c6db2a3 | |||
896820a349 | |||
06c8f468bf | |||
61920e2701 | |||
f34ba7ca70 | |||
c30ef0895d | |||
aa3a774f73 | |||
2c30555b84 | |||
743f605773 | |||
519c661abb | |||
22c956c75f | |||
13696adc3a | |||
0196571a12 | |||
9666f466ab | |||
240e5486c8 | |||
8164b6b9cf | |||
4fc82d554f | |||
96b34c0f85 | |||
dd5a88dcee | |||
95ed56bf82 | |||
1ae80f5ab9 | |||
1f0bd3ca6c | |||
a1971f6830 | |||
c6118e8898 | |||
7ba958cf7f | |||
383905d5d2 | |||
6173e3e9ca | |||
3feb7d8922 | |||
1d9edbd0dd | |||
d439abdb89 | |||
ee47ea0c89 | |||
300bb2e627 | |||
ccf8593501 | |||
0fda612f3f | |||
5afff65b71 | |||
7e55bdefce | |||
620cf84d3d | |||
cfe567c62a | |||
cefe12f1df | |||
1e51c39928 | |||
42a02bbb80 | |||
f1ae6dae4c | |||
6195579910 | |||
16c8b23b34 | |||
07ae626b22 | |||
8d171bb044 | |||
6e33ca7e9e | |||
db46e12f2b | |||
868e4b2db8 | |||
2e562742c1 | |||
68e6958009 | |||
ea6e3a7949 | |||
b2879ca99f | |||
4e911566c3 | |||
9bafda6a15 | |||
871a8a5375 | |||
0eef74bc00 | |||
423ae32097 | |||
8282e5d045 | |||
19305cdbdf | |||
eb9028ab30 | |||
21483f5d07 | |||
82dcbac28f | |||
d43bd4625d | |||
ea891324a2 | |||
8fd9ea2193 | |||
fb02666856 | |||
f6f5c2731b | |||
b4e3f771e0 | |||
99bb9491ac | |||
0453f21127 | |||
9fc09aa4bd | |||
5e87062cf8 | |||
3e7a459990 | |||
bbf4c03e50 | |||
611a3a9753 | |||
1611f0d181 | |||
08835115e4 | |||
2d84e28d32 | |||
ef17aae8ab | |||
0cc39f01a3 | |||
688d7258f1 | |||
4513320bf1 | |||
533fd04ef0 | |||
dff5681cf0 | |||
5a2790a69b | |||
7c5305ccba | |||
4013e8ad6f | |||
d1dfd257f9 | |||
5322d735ee | |||
cdb107dcda | |||
be1393a41c | |||
e554c2607f | |||
6215592b12 | |||
349cc25433 | |||
214d276379 | |||
ef24d76adc | |||
ab2b5a691d | |||
c7de2b2801 | |||
e8075658ac | |||
4202dabee1 | |||
d67db2bcf1 | |||
7159ec885f | |||
b5cf734ba9 | |||
f7dc8eafee | |||
762ca60a30 | |||
e7fb9f342c |
@ -1,19 +1,12 @@
|
||||
*
|
||||
!backend
|
||||
!configs
|
||||
!environments-and-requirements
|
||||
!frontend
|
||||
!installer
|
||||
!ldm
|
||||
!main.py
|
||||
!scripts
|
||||
!server
|
||||
!static
|
||||
!setup.py
|
||||
|
||||
# Guard against pulling in any models that might exist in the directory tree
|
||||
**/*.pt*
|
||||
|
||||
# unignore configs, but only ignore the custom models.yaml, in case it exists
|
||||
!configs
|
||||
configs/models.yaml
|
||||
|
||||
**/__pycache__
|
||||
|
2
.gitattributes
vendored
2
.gitattributes
vendored
@ -1,4 +1,4 @@
|
||||
# Auto normalizes line endings on commit so devs don't need to change local settings.
|
||||
# Only affects text files and ignores other file types.
|
||||
# Only affects text files and ignores other file types.
|
||||
# For more info see: https://www.aleksandrhovhannisyan.com/blog/crlf-vs-lf-normalizing-line-endings-in-git/
|
||||
* text=auto
|
||||
|
87
.github/workflows/build-cloud-img.yml
vendored
87
.github/workflows/build-cloud-img.yml
vendored
@ -1,87 +0,0 @@
|
||||
name: Build and push cloud image
|
||||
on:
|
||||
workflow_dispatch:
|
||||
# push:
|
||||
# branches:
|
||||
# - main
|
||||
# tags:
|
||||
# - v*
|
||||
# # we will NOT push the image on pull requests, only test buildability.
|
||||
# pull_request:
|
||||
# branches:
|
||||
# - main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch:
|
||||
- x86_64
|
||||
# requires resolving a patchmatch issue
|
||||
# - aarch64
|
||||
runs-on: ubuntu-latest
|
||||
name: ${{ matrix.arch }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
if: matrix.arch == 'aarch64'
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
# see https://github.com/docker/metadata-action
|
||||
# will push the following tags:
|
||||
# :edge
|
||||
# :main (+ any other branches enabled in the workflow)
|
||||
# :<tag>
|
||||
# :1.2.3 (for semver tags)
|
||||
# :1.2 (for semver tags)
|
||||
# :<sha>
|
||||
tags: |
|
||||
type=edge,branch=main
|
||||
type=ref,event=branch
|
||||
type=ref,event=tag
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=sha
|
||||
# suffix image tags with architecture
|
||||
flavor: |
|
||||
latest=auto
|
||||
suffix=-${{ matrix.arch }},latest=true
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
# do not login to container registry on PRs
|
||||
- if: github.event_name != 'pull_request'
|
||||
name: Docker login
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push cloud image
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: docker-build/Dockerfile.cloud
|
||||
platforms: Linux/${{ matrix.arch }}
|
||||
# do not push the image on PRs
|
||||
push: false
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
69
.github/workflows/build-container.yml
vendored
69
.github/workflows/build-container.yml
vendored
@ -1,74 +1,43 @@
|
||||
# Building the Image without pushing to confirm it is still buildable
|
||||
# confirum functionality would unfortunately need way more resources
|
||||
name: build container image
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
- 'update-dockerfile'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
registry:
|
||||
- ghcr.io
|
||||
flavor:
|
||||
- amd
|
||||
- cuda
|
||||
# - cloud
|
||||
include:
|
||||
- flavor: amd
|
||||
pip-requirements: requirements-lin-amd.txt
|
||||
dockerfile: docker-build/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- flavor: cuda
|
||||
pip-requirements: requirements-lin-cuda.txt
|
||||
dockerfile: docker-build/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
# - flavor: cloud
|
||||
# pip-requirements: requirements-lin-cuda.txt
|
||||
# dockerfile: docker-build/Dockerfile.cloud
|
||||
# platforms: linux/amd64
|
||||
arch:
|
||||
- x86_64
|
||||
- aarch64
|
||||
pip-requirements:
|
||||
- requirements-lin-amd.txt
|
||||
- requirements-lin-cuda.txt
|
||||
runs-on: ubuntu-latest
|
||||
name: ${{ matrix.flavor }}
|
||||
name: ${{ matrix.pip-requirements }} ${{ matrix.arch }}
|
||||
steps:
|
||||
- name: prepare docker-tag
|
||||
env:
|
||||
repository: ${{ github.repository }}
|
||||
run: echo "dockertag=${repository,,}" >> $GITHUB_ENV
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ matrix.registry }}/${{ github.repository }}-${{ matrix.flavor }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=tag
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=sha
|
||||
flavor: |
|
||||
latest=true
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- if: github.event_name != 'pull_request'
|
||||
name: Docker login
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ matrix.registry }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build container
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: ${{ matrix.dockerfile }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
file: docker-build/Dockerfile
|
||||
platforms: Linux/${{ matrix.arch }}
|
||||
push: false
|
||||
tags: ${{ env.dockertag }}:${{ matrix.pip-requirements }}-${{ matrix.arch }}
|
||||
build-args: pip_requirements=${{ matrix.pip-requirements }}
|
||||
|
28
.github/workflows/lint-frontend.yml
vendored
28
.github/workflows/lint-frontend.yml
vendored
@ -1,28 +0,0 @@
|
||||
name: Lint frontend
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
push:
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: frontend
|
||||
|
||||
jobs:
|
||||
lint-frontend:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Setup Node 18
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: '18'
|
||||
- uses: actions/checkout@v3
|
||||
- run: 'yarn install --frozen-lockfile'
|
||||
- run: 'yarn tsc'
|
||||
- run: 'yarn run madge'
|
||||
- run: 'yarn run lint --max-warnings=0'
|
||||
- run: 'yarn run prettier --check'
|
19
.github/workflows/pyflakes.yml
vendored
19
.github/workflows/pyflakes.yml
vendored
@ -1,19 +0,0 @@
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- development
|
||||
- 'release-candidate-*'
|
||||
|
||||
jobs:
|
||||
pyflakes:
|
||||
name: runner / pyflakes
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: pyflakes
|
||||
uses: reviewdog/action-pyflakes@v1
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
reporter: github-pr-review
|
58
.github/workflows/test-invoke-conda.yml
vendored
58
.github/workflows/test-invoke-conda.yml
vendored
@ -3,29 +3,15 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
- 'fix-gh-actions-fork'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
- 'converted_to_draft'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
- 'development'
|
||||
|
||||
jobs:
|
||||
fail_if_pull_request_is_draft:
|
||||
if: github.event.pull_request.draft == true
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Fails in order to indicate that pull request needs to be marked as ready to review and unit tests workflow needs to pass.
|
||||
run: exit 1
|
||||
|
||||
matrix:
|
||||
if: github.event.pull_request.draft == false
|
||||
strategy:
|
||||
matrix:
|
||||
stable-diffusion-model:
|
||||
@ -34,28 +20,16 @@ jobs:
|
||||
- environment-lin-amd.yml
|
||||
- environment-lin-cuda.yml
|
||||
- environment-mac.yml
|
||||
- environment-win-cuda.yml
|
||||
include:
|
||||
- environment-yaml: environment-lin-amd.yml
|
||||
os: ubuntu-22.04
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
os: ubuntu-latest
|
||||
default-shell: bash -l {0}
|
||||
- environment-yaml: environment-lin-cuda.yml
|
||||
os: ubuntu-22.04
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
os: ubuntu-latest
|
||||
default-shell: bash -l {0}
|
||||
- environment-yaml: environment-mac.yml
|
||||
os: macos-12
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
default-shell: bash -l {0}
|
||||
- environment-yaml: environment-win-cuda.yml
|
||||
os: windows-2022
|
||||
curl-command: curl.exe
|
||||
github-env: $env:GITHUB_ENV
|
||||
default-shell: pwsh
|
||||
- stable-diffusion-model: stable-diffusion-1.5
|
||||
stable-diffusion-model-url: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1
|
||||
@ -98,15 +72,15 @@ jobs:
|
||||
|
||||
- name: set test prompt to main branch validation
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
|
||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: set test prompt to development branch validation
|
||||
if: ${{ github.ref == 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> ${{ matrix.github-env }}
|
||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: set test prompt to Pull Request validation
|
||||
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: Use Cached Stable Diffusion Model
|
||||
id: cache-sd-model
|
||||
@ -122,20 +96,22 @@ jobs:
|
||||
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
mkdir -p "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}"
|
||||
${{ matrix.curl-command }} -H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" -o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" -L ${{ matrix.stable-diffusion-model-url }}
|
||||
curl \
|
||||
-H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" \
|
||||
-o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" \
|
||||
-L ${{ matrix.stable-diffusion-model-url }}
|
||||
|
||||
- name: run configure_invokeai.py
|
||||
id: run-preload-models
|
||||
run: |
|
||||
python scripts/configure_invokeai.py --skip-sd-weights --yes
|
||||
python scripts/configure_invokeai.py --no-interactive --yes
|
||||
|
||||
- name: cat invokeai.init
|
||||
- name: cat ~/.invokeai
|
||||
id: cat-invokeai
|
||||
run: cat ${{ env.INVOKEAI_ROOT }}/invokeai.init
|
||||
run: cat ~/.invokeai
|
||||
|
||||
- name: Run the tests
|
||||
id: run-tests
|
||||
if: matrix.os != 'windows-2022'
|
||||
run: |
|
||||
time python scripts/invoke.py \
|
||||
--no-patchmatch \
|
||||
@ -147,13 +123,11 @@ jobs:
|
||||
|
||||
- name: export conda env
|
||||
id: export-conda-env
|
||||
if: matrix.os != 'windows-2022'
|
||||
run: |
|
||||
mkdir -p outputs/img-samples
|
||||
conda env export --name ${{ env.CONDA_ENV_NAME }} > ${{ env.INVOKEAI_ROOT }}/outputs/environment-${{ runner.os }}-${{ runner.arch }}.yml
|
||||
conda env export --name ${{ env.CONDA_ENV_NAME }} > outputs/img-samples/environment-${{ runner.os }}-${{ runner.arch }}.yml
|
||||
|
||||
- name: Archive results
|
||||
if: matrix.os != 'windows-2022'
|
||||
id: archive-results
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
|
92
.github/workflows/test-invoke-pip.yml
vendored
92
.github/workflows/test-invoke-pip.yml
vendored
@ -3,28 +3,14 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
types:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
- 'converted_to_draft'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
- 'development'
|
||||
|
||||
jobs:
|
||||
fail_if_pull_request_is_draft:
|
||||
if: github.event.pull_request.draft == true
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Fails in order to indicate that pull request needs to be marked as ready to review and unit tests workflow needs to pass.
|
||||
run: exit 1
|
||||
matrix:
|
||||
if: github.event.pull_request.draft == false
|
||||
strategy:
|
||||
matrix:
|
||||
stable-diffusion-model:
|
||||
@ -33,50 +19,35 @@ jobs:
|
||||
- requirements-lin-cuda.txt
|
||||
- requirements-lin-amd.txt
|
||||
- requirements-mac-mps-cpu.txt
|
||||
- requirements-win-colab-cuda.txt
|
||||
python-version:
|
||||
# - '3.9'
|
||||
- '3.10'
|
||||
include:
|
||||
- requirements-file: requirements-lin-cuda.txt
|
||||
os: ubuntu-22.04
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
os: ubuntu-latest
|
||||
default-shell: bash -l {0}
|
||||
- requirements-file: requirements-lin-amd.txt
|
||||
os: ubuntu-22.04
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
os: ubuntu-latest
|
||||
default-shell: bash -l {0}
|
||||
- requirements-file: requirements-mac-mps-cpu.txt
|
||||
os: macOS-12
|
||||
curl-command: curl
|
||||
github-env: $GITHUB_ENV
|
||||
- requirements-file: requirements-win-colab-cuda.txt
|
||||
os: windows-2022
|
||||
curl-command: curl.exe
|
||||
github-env: $env:GITHUB_ENV
|
||||
default-shell: bash -l {0}
|
||||
- stable-diffusion-model: stable-diffusion-1.5
|
||||
stable-diffusion-model-url: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1
|
||||
stable-diffusion-model-dl-name: v1-5-pruned-emaonly.ckpt
|
||||
name: ${{ matrix.requirements-file }} on ${{ matrix.python-version }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.default-shell }}
|
||||
env:
|
||||
INVOKEAI_ROOT: '${{ github.workspace }}/invokeai'
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
id: checkout-sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: set INVOKEAI_ROOT Windows
|
||||
if: matrix.os == 'windows-2022'
|
||||
run: |
|
||||
echo "INVOKEAI_ROOT=${{ github.workspace }}\invokeai" >> ${{ matrix.github-env }}
|
||||
echo "INVOKEAI_OUTDIR=${{ github.workspace }}\invokeai\outputs" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: set INVOKEAI_ROOT others
|
||||
if: matrix.os != 'windows-2022'
|
||||
run: |
|
||||
echo "INVOKEAI_ROOT=${{ github.workspace }}/invokeai" >> ${{ matrix.github-env }}
|
||||
echo "INVOKEAI_OUTDIR=${{ github.workspace }}/invokeai/outputs" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: create models.yaml from example
|
||||
run: |
|
||||
mkdir -p ${{ env.INVOKEAI_ROOT }}/configs
|
||||
@ -84,15 +55,15 @@ jobs:
|
||||
|
||||
- name: set test prompt to main branch validation
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
|
||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: set test prompt to development branch validation
|
||||
if: ${{ github.ref == 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> ${{ matrix.github-env }}
|
||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: set test prompt to Pull Request validation
|
||||
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> $GITHUB_ENV
|
||||
|
||||
- name: create requirements.txt
|
||||
run: cp 'environments-and-requirements/${{ matrix.requirements-file }}' '${{ matrix.requirements-file }}'
|
||||
@ -101,14 +72,14 @@ jobs:
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
# cache: 'pip'
|
||||
# cache-dependency-path: ${{ matrix.requirements-file }}
|
||||
cache: 'pip'
|
||||
cache-dependency-path: ${{ matrix.requirements-file }}
|
||||
|
||||
- name: install dependencies
|
||||
run: pip3 install --upgrade pip setuptools wheel
|
||||
# - name: install dependencies
|
||||
# run: ${{ env.pythonLocation }}/bin/pip install --upgrade pip setuptools wheel
|
||||
|
||||
- name: install requirements
|
||||
run: pip3 install -r '${{ matrix.requirements-file }}'
|
||||
run: ${{ env.pythonLocation }}/bin/pip install -r '${{ matrix.requirements-file }}'
|
||||
|
||||
- name: Use Cached Stable Diffusion Model
|
||||
id: cache-sd-model
|
||||
@ -124,20 +95,33 @@ jobs:
|
||||
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
mkdir -p "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}"
|
||||
${{ matrix.curl-command }} -H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" -o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" -L ${{ matrix.stable-diffusion-model-url }}
|
||||
curl \
|
||||
-H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" \
|
||||
-o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" \
|
||||
-L ${{ matrix.stable-diffusion-model-url }}
|
||||
|
||||
- name: run configure_invokeai.py
|
||||
id: run-preload-models
|
||||
run: python3 scripts/configure_invokeai.py --skip-sd-weights --yes
|
||||
run: |
|
||||
${{ env.pythonLocation }}/bin/python scripts/configure_invokeai.py --no-interactive --yes
|
||||
|
||||
- name: cat ~/.invokeai
|
||||
id: cat-invokeai
|
||||
run: cat ~/.invokeai
|
||||
|
||||
- name: Run the tests
|
||||
id: run-tests
|
||||
if: matrix.os != 'windows-2022'
|
||||
run: python3 scripts/invoke.py --no-patchmatch --no-nsfw_checker --model ${{ matrix.stable-diffusion-model }} --from_file ${{ env.TEST_PROMPTS }} --root="${{ env.INVOKEAI_ROOT }}" --outdir="${{ env.INVOKEAI_OUTDIR }}"
|
||||
run: |
|
||||
time ${{ env.pythonLocation }}/bin/python scripts/invoke.py \
|
||||
--no-patchmatch \
|
||||
--no-nsfw_checker \
|
||||
--model ${{ matrix.stable-diffusion-model }} \
|
||||
--from_file ${{ env.TEST_PROMPTS }} \
|
||||
--root="${{ env.INVOKEAI_ROOT }}" \
|
||||
--outdir="${{ env.INVOKEAI_ROOT }}/outputs"
|
||||
|
||||
- name: Archive results
|
||||
id: archive-results
|
||||
if: matrix.os != 'windows-2022'
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: results_${{ matrix.requirements-file }}_${{ matrix.python-version }}
|
||||
|
12
.gitignore
vendored
12
.gitignore
vendored
@ -6,7 +6,6 @@ models/ldm/stable-diffusion-v1/model.ckpt
|
||||
# ignore user models config
|
||||
configs/models.user.yaml
|
||||
config/models.user.yml
|
||||
invokeai.init
|
||||
|
||||
# ignore the Anaconda/Miniconda installer used while building Docker image
|
||||
anaconda.sh
|
||||
@ -223,11 +222,12 @@ environment.yml
|
||||
requirements.txt
|
||||
|
||||
# source installer files
|
||||
installer/*zip
|
||||
installer/install.bat
|
||||
installer/install.sh
|
||||
installer/update.bat
|
||||
installer/update.sh
|
||||
source_installer/*zip
|
||||
source_installer/invokeAI
|
||||
install.bat
|
||||
install.sh
|
||||
update.bat
|
||||
update.sh
|
||||
|
||||
# this may be present if the user created a venv
|
||||
invokeai
|
||||
|
@ -1,4 +1,4 @@
|
||||
<img src="docs/assets/invoke_ai_banner.png" align="center">
|
||||
<img src="docs/assets/invoke_ai_banner.png" align="center">
|
||||
|
||||
Invoke-AI is a community of software developers, researchers, and user
|
||||
interface experts who have come together on a voluntary basis to build
|
||||
@ -81,4 +81,5 @@ area. Disputes are resolved by open and honest communication.
|
||||
|
||||
## Signature
|
||||
|
||||
This document has been collectively crafted and approved by the current InvokeAI team members, as of 28 Nov 2022: **lstein** (Lincoln Stein), **blessedcoolant**, **hipsterusername** (Kent Keirsey), **Kyle0654** (Kyle Schouviller), **damian0815**, **mauwii** (Matthias Wild), **Netsvetaev** (Artur Netsvetaev), **psychedelicious**, **tildebyte**, **keturn**, and **ebr** (Eugene Brodsky). Although individuals within the group may hold differing views on particular details and/or their implications, we are all in agreement about its fundamental statements, as well as their significance and importance to this project moving forward.
|
||||
This document has been collectively crafted and approved by the current InvokeAI team members, as of 28 Nov 2022: **lstein** (Lincoln Stein), **blessedcoolant**, **hipsterusername** (Kent Keirsey), **Kyle0654** (Kyle Schouviller), **damian0815**, **mauwii** (Matthias Wild), **Netsvetaev** (Artur Netsvetaev), **psychedelicious**, **tildebyte**, and **keturn**. Although individuals within the group may hold differing views on particular details and/or their implications, we are all in agreement about its fundamental statements, as well as their significance and importance to this project moving forward.
|
||||
|
||||
|
89
README.md
89
README.md
@ -1,9 +1,11 @@
|
||||
<div align="center">
|
||||
|
||||

|
||||
|
||||
# InvokeAI: A Stable Diffusion Toolkit
|
||||
|
||||
_Formerly known as lstein/stable-diffusion_
|
||||
|
||||

|
||||
|
||||
[![discord badge]][discord link]
|
||||
|
||||
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]
|
||||
@ -36,33 +38,18 @@ This is a fork of
|
||||
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion),
|
||||
the open source text-to-image generator. It provides a streamlined
|
||||
process with various new features and options to aid the image
|
||||
generation process. It runs on Windows, macOS and Linux machines, with
|
||||
generation process. It runs on Windows, Mac and Linux machines, with
|
||||
GPU cards with as little as 4 GB of RAM. It provides both a polished
|
||||
Web interface (see below), and an easy-to-use command-line interface.
|
||||
|
||||
**Quick links**: [[How to Install](#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||
|
||||
_Note: InvokeAI is rapidly evolving. Please use the
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||
requests. Be sure to use the provided templates. They will help us diagnose issues faster._
|
||||
|
||||
# Getting Started with InvokeAI
|
||||
|
||||
For full installation and upgrade instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||
|
||||
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
|
||||
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
||||
3. Unzip the file.
|
||||
4. If you are on Windows, double-click on the `install.bat` script. On macOS, open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press return. On Linux, run `install.sh`.
|
||||
5. Wait a while, until it is done.
|
||||
6. The folder where you ran the installer from will now be filled with lots of files. If you are on Windows, double-click on the `invoke.bat` file. On macOS, open a Terminal window, drag `invoke.sh` from the folder into the Terminal, and press return. On Linux, run `invoke.sh`
|
||||
7. Press 2 to open the "browser-based UI", press enter/return, wait a minute or two for Stable Diffusion to start up, then open your browser and go to http://localhost:9090.
|
||||
8. Type `banana sushi` in the box on the top left and click `Invoke`:
|
||||
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||
|
||||
<div align="center"><img src="docs/assets/invoke-web-server-1.png" width=640></div>
|
||||
|
||||
|
||||
_Note: This fork is rapidly evolving. Please use the
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||
requests. Be sure to use the provided templates. They will help aid diagnose issues faster._
|
||||
|
||||
## Table of Contents
|
||||
|
||||
@ -82,13 +69,10 @@ This fork is supported across Linux, Windows and Macintosh. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
AMD card (using the ROCm driver). For full installation and upgrade
|
||||
instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||
|
||||
### Hardware Requirements
|
||||
|
||||
InvokeAI is supported across Linux, Windows and macOS. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
AMD card (using the ROCm driver).
|
||||
#### System
|
||||
|
||||
You wil need one of the following:
|
||||
@ -96,10 +80,6 @@ You wil need one of the following:
|
||||
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||
- An Apple computer with an M1 chip.
|
||||
|
||||
We do not recommend the GTX 1650 or 1660 series video cards. They are
|
||||
unable to run in half-precision mode and do not have sufficient VRAM
|
||||
to render 512x512 images.
|
||||
|
||||
#### Memory
|
||||
|
||||
- At least 12 GB Main Memory RAM.
|
||||
@ -117,12 +97,11 @@ Similarly, specify full-precision mode on Apple M1 hardware.
|
||||
|
||||
Precision is auto configured based on the device. If however you encounter
|
||||
errors like 'expected type Float but found Half' or 'not implemented for Half'
|
||||
you can try starting `invoke.py` with the `--precision=float32` flag to your initialization command
|
||||
you can try starting `invoke.py` with the `--precision=float32` flag:
|
||||
|
||||
```bash
|
||||
(invokeai) ~/InvokeAI$ python scripts/invoke.py --precision=float32
|
||||
```
|
||||
Or by updating your InvokeAI configuration file with this argument.
|
||||
|
||||
### Features
|
||||
|
||||
@ -151,7 +130,39 @@ Or by updating your InvokeAI configuration file with this argument.
|
||||
|
||||
### Latest Changes
|
||||
|
||||
For our latest changes, view our [Release Notes](https://github.com/invoke-ai/InvokeAI/releases)
|
||||
- v2.0.1 (13 October 2022)
|
||||
- fix noisy images at high step count when using k* samplers
|
||||
- dream.py script now calls invoke.py module directly rather than
|
||||
via a new python process (which could break the environment)
|
||||
|
||||
- v2.0.0 (9 October 2022)
|
||||
|
||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
||||
for backward compatibility.
|
||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/INPAINTING/">inpainting</a> and <a href="https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/">outpainting</a>
|
||||
- img2img runs on all k* samplers
|
||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts">negative prompts</a>
|
||||
- Support for CodeFormer face reconstruction
|
||||
- Support for Textual Inversion on Macintoshes
|
||||
- Support in both WebGUI and CLI for <a href="https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/">post-processing of previously-generated images</a>
|
||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
||||
and "embiggen" upscaling. See the `!fix` command.
|
||||
- New `--hires` option on `invoke>` line allows <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/#txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
||||
- New `--perlin` and `--threshold` options allow you to add and control variation
|
||||
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
|
||||
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
||||
and tweaking of previous settings.
|
||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
||||
- Improved <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/">command-line completion behavior</a>.
|
||||
New commands added:
|
||||
- List command-line history with `!history`
|
||||
- Search command-line history with `!search`
|
||||
- Clear history with `!clear`
|
||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||
configure. To switch away from auto use the new flag like `--precision=float32`.
|
||||
|
||||
For older changelogs, please visit the **[CHANGELOG](https://invoke-ai.github.io/InvokeAI/CHANGELOG#v114-11-september-2022)**.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
@ -161,13 +172,15 @@ problems and other issues.
|
||||
# Contributing
|
||||
|
||||
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
||||
cleanup, testing, or code reviews, is very much encouraged to do so.
|
||||
|
||||
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
|
||||
cleanup, testing, or code reviews, is very much encouraged to do so. To join, just raise your hand on the InvokeAI
|
||||
Discord server or discussion board.
|
||||
|
||||
If you are unfamiliar with how
|
||||
to contribute to GitHub projects, here is a
|
||||
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
|
||||
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress, but for now the most
|
||||
important thing is to **make your pull request against the "development" branch**, and not against
|
||||
"main". This will help keep public breakage to a minimum and will allow you to propose more radical
|
||||
changes.
|
||||
|
||||
We hope you enjoy using our software as much as we enjoy creating it,
|
||||
and we hope that some of those of you who are reading this will elect
|
||||
@ -186,7 +199,7 @@ their time, hard work and effort.
|
||||
For support, please use this repository's GitHub Issues tracking service. Feel free to send me an
|
||||
email if you use and like the script.
|
||||
|
||||
Original portions of the software are Copyright (c) 2022
|
||||
Original portions of the software are Copyright (c) 2020
|
||||
[Lincoln D. Stein](https://github.com/lstein)
|
||||
|
||||
### Further Reading
|
||||
|
@ -21,7 +21,7 @@ This model card focuses on the model associated with the Stable Diffusion model,
|
||||
|
||||
# Uses
|
||||
|
||||
## Direct Use
|
||||
## Direct Use
|
||||
The model is intended for research purposes only. Possible research areas and
|
||||
tasks include
|
||||
|
||||
@ -68,11 +68,11 @@ Using the model to generate content that is cruel to individuals is a misuse of
|
||||
considerations.
|
||||
|
||||
### Bias
|
||||
While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases.
|
||||
Stable Diffusion v1 was trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/),
|
||||
which consists of images that are primarily limited to English descriptions.
|
||||
Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for.
|
||||
This affects the overall output of the model, as white and western cultures are often set as the default. Further, the
|
||||
While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases.
|
||||
Stable Diffusion v1 was trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/),
|
||||
which consists of images that are primarily limited to English descriptions.
|
||||
Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for.
|
||||
This affects the overall output of the model, as white and western cultures are often set as the default. Further, the
|
||||
ability of the model to generate content with non-English prompts is significantly worse than with English-language prompts.
|
||||
|
||||
|
||||
@ -84,7 +84,7 @@ The model developers used the following dataset for training the model:
|
||||
- LAION-2B (en) and subsets thereof (see next section)
|
||||
|
||||
**Training Procedure**
|
||||
Stable Diffusion v1 is a latent diffusion model which combines an autoencoder with a diffusion model that is trained in the latent space of the autoencoder. During training,
|
||||
Stable Diffusion v1 is a latent diffusion model which combines an autoencoder with a diffusion model that is trained in the latent space of the autoencoder. During training,
|
||||
|
||||
- Images are encoded through an encoder, which turns images into latent representations. The autoencoder uses a relative downsampling factor of 8 and maps images of shape H x W x 3 to latents of shape H/f x W/f x 4
|
||||
- Text prompts are encoded through a ViT-L/14 text-encoder.
|
||||
@ -108,12 +108,12 @@ filtered to images with an original size `>= 512x512`, estimated aesthetics scor
|
||||
- **Batch:** 32 x 8 x 2 x 4 = 2048
|
||||
- **Learning rate:** warmup to 0.0001 for 10,000 steps and then kept constant
|
||||
|
||||
## Evaluation Results
|
||||
## Evaluation Results
|
||||
Evaluations with different classifier-free guidance scales (1.5, 2.0, 3.0, 4.0,
|
||||
5.0, 6.0, 7.0, 8.0) and 50 PLMS sampling
|
||||
steps show the relative improvements of the checkpoints:
|
||||
|
||||

|
||||

|
||||
|
||||
Evaluated using 50 PLMS steps and 10000 random prompts from the COCO2017 validation set, evaluated at 512x512 resolution. Not optimized for FID scores.
|
||||
## Environmental Impact
|
||||
|
@ -18,12 +18,9 @@ from PIL.Image import Image as ImageType
|
||||
from uuid import uuid4
|
||||
from threading import Event
|
||||
|
||||
from ldm.generate import Generate
|
||||
from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
|
||||
from ldm.invoke.conditioning import get_tokens_for_prompt, get_prompt_structure
|
||||
from ldm.invoke.globals import Globals
|
||||
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
|
||||
from ldm.invoke.prompt_parser import split_weighted_subprompts, Blend
|
||||
from ldm.invoke.prompt_parser import split_weighted_subprompts
|
||||
from ldm.invoke.generator.inpaint import infill_methods
|
||||
|
||||
from backend.modules.parameters import parameters_to_command
|
||||
@ -40,12 +37,9 @@ args.root_dir = os.path.expanduser(args.root_dir or "..")
|
||||
if not os.path.isabs(args.outdir):
|
||||
args.outdir = os.path.join(args.root_dir, args.outdir)
|
||||
|
||||
# normalize the config directory relative to root
|
||||
if not os.path.isabs(opt.conf):
|
||||
opt.conf = os.path.normpath(os.path.join(Globals.root,opt.conf))
|
||||
|
||||
class InvokeAIWebServer:
|
||||
def __init__(self, generate: Generate, gfpgan, codeformer, esrgan) -> None:
|
||||
def __init__(self, generate, gfpgan, codeformer, esrgan) -> None:
|
||||
self.host = args.host
|
||||
self.port = args.port
|
||||
|
||||
@ -249,16 +243,14 @@ class InvokeAIWebServer:
|
||||
|
||||
def find_frontend(self):
|
||||
my_dir = os.path.dirname(__file__)
|
||||
# LS: setup.py seems to put the frontend in different places on different systems, so
|
||||
# this is fragile and needs to be replaced with a better way of finding the front end.
|
||||
for candidate in (os.path.join(my_dir,'..','frontend','dist'), # pip install -e .
|
||||
os.path.join(my_dir,'../../../../frontend','dist'), # pip install . (Linux, Mac)
|
||||
os.path.join(my_dir,'../../../frontend','dist'), # pip install . (Windows)
|
||||
for candidate in (os.path.join(my_dir,'..','frontend','dist'), # pip install -e .
|
||||
os.path.join(my_dir,'../../../../frontend','dist') # pip install .
|
||||
):
|
||||
if os.path.exists(candidate):
|
||||
return candidate
|
||||
assert "Frontend files cannot be found. Cannot continue"
|
||||
|
||||
|
||||
def setup_app(self):
|
||||
self.result_url = "outputs/"
|
||||
self.init_image_url = "outputs/init-images/"
|
||||
@ -298,81 +290,9 @@ class InvokeAIWebServer:
|
||||
print(f">> System config requested")
|
||||
config = self.get_system_config()
|
||||
config["model_list"] = self.generate.model_cache.list_models()
|
||||
config["infill_methods"] = infill_methods()
|
||||
config["infill_methods"] = infill_methods
|
||||
socketio.emit("systemConfig", config)
|
||||
|
||||
@socketio.on('searchForModels')
|
||||
def handle_search_models(search_folder: str):
|
||||
try:
|
||||
if not search_folder:
|
||||
socketio.emit(
|
||||
"foundModels",
|
||||
{'search_folder': None, 'found_models': None},
|
||||
)
|
||||
else:
|
||||
search_folder, found_models = self.generate.model_cache.search_models(search_folder)
|
||||
socketio.emit(
|
||||
"foundModels",
|
||||
{'search_folder': search_folder, 'found_models': found_models},
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
|
||||
@socketio.on("addNewModel")
|
||||
def handle_add_model(new_model_config: dict):
|
||||
try:
|
||||
model_name = new_model_config['name']
|
||||
del new_model_config['name']
|
||||
model_attributes = new_model_config
|
||||
update = False
|
||||
current_model_list = self.generate.model_cache.list_models()
|
||||
if model_name in current_model_list:
|
||||
update = True
|
||||
|
||||
print(f">> Adding New Model: {model_name}")
|
||||
|
||||
self.generate.model_cache.add_model(
|
||||
model_name=model_name, model_attributes=model_attributes, clobber=True)
|
||||
self.generate.model_cache.commit(opt.conf)
|
||||
|
||||
new_model_list = self.generate.model_cache.list_models()
|
||||
socketio.emit(
|
||||
"newModelAdded",
|
||||
{"new_model_name": model_name,
|
||||
"model_list": new_model_list, 'update': update},
|
||||
)
|
||||
print(f">> New Model Added: {model_name}")
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
|
||||
@socketio.on("deleteModel")
|
||||
def handle_delete_model(model_name: str):
|
||||
try:
|
||||
print(f">> Deleting Model: {model_name}")
|
||||
self.generate.model_cache.del_model(model_name)
|
||||
self.generate.model_cache.commit(opt.conf)
|
||||
updated_model_list = self.generate.model_cache.list_models()
|
||||
socketio.emit(
|
||||
"modelDeleted",
|
||||
{"deleted_model_name": model_name,
|
||||
"model_list": updated_model_list},
|
||||
)
|
||||
print(f">> Model Deleted: {model_name}")
|
||||
except Exception as e:
|
||||
self.socketio.emit("error", {"message": (str(e))})
|
||||
print("\n")
|
||||
|
||||
traceback.print_exc()
|
||||
print("\n")
|
||||
|
||||
@socketio.on("requestModelChange")
|
||||
def handle_set_model(model_name: str):
|
||||
try:
|
||||
@ -669,11 +589,11 @@ class InvokeAIWebServer:
|
||||
pass
|
||||
|
||||
if postprocessing_parameters["type"] == "esrgan":
|
||||
progress.set_current_status("common:statusUpscalingESRGAN")
|
||||
progress.set_current_status("Upscaling (ESRGAN)")
|
||||
elif postprocessing_parameters["type"] == "gfpgan":
|
||||
progress.set_current_status("common:statusRestoringFacesGFPGAN")
|
||||
progress.set_current_status("Restoring Faces (GFPGAN)")
|
||||
elif postprocessing_parameters["type"] == "codeformer":
|
||||
progress.set_current_status("common:statusRestoringFacesCodeFormer")
|
||||
progress.set_current_status("Restoring Faces (Codeformer)")
|
||||
|
||||
socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
@ -706,7 +626,7 @@ class InvokeAIWebServer:
|
||||
f'{postprocessing_parameters["type"]} is not a valid postprocessing type'
|
||||
)
|
||||
|
||||
progress.set_current_status("common:statusSavingImage")
|
||||
progress.set_current_status("Saving Image")
|
||||
socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
@ -855,10 +775,10 @@ class InvokeAIWebServer:
|
||||
).convert("RGBA")
|
||||
|
||||
"""
|
||||
The outpaint image and mask are pre-cropped by the UI, so the bounding box we pass
|
||||
The outpaint image and mask are pre-cropped by the UI, so the bounding box we pass
|
||||
to the generator should be:
|
||||
{
|
||||
"x": 0,
|
||||
"x": 0,
|
||||
"y": 0,
|
||||
"width": original_bounding_box["width"],
|
||||
"height": original_bounding_box["height"]
|
||||
@ -878,7 +798,7 @@ class InvokeAIWebServer:
|
||||
)
|
||||
|
||||
"""
|
||||
Apply the mask to the init image, creating a "mask" image with
|
||||
Apply the mask to the init image, creating a "mask" image with
|
||||
transparency where inpainting should occur. This is the kind of
|
||||
mask that prompt2image() needs.
|
||||
"""
|
||||
@ -936,15 +856,15 @@ class InvokeAIWebServer:
|
||||
nonlocal progress
|
||||
|
||||
generation_messages = {
|
||||
"txt2img": "common:statusGeneratingTextToImage",
|
||||
"img2img": "common:statusGeneratingImageToImage",
|
||||
"inpainting": "common:statusGeneratingInpainting",
|
||||
"outpainting": "common:statusGeneratingOutpainting",
|
||||
"txt2img": "Text to Image",
|
||||
"img2img": "Image to Image",
|
||||
"inpainting": "Inpainting",
|
||||
"outpainting": "Outpainting",
|
||||
}
|
||||
|
||||
progress.set_current_step(step + 1)
|
||||
progress.set_current_status(
|
||||
f"{generation_messages[actual_generation_mode]}"
|
||||
f"Generating ({generation_messages[actual_generation_mode]})"
|
||||
)
|
||||
progress.set_current_status_has_steps(True)
|
||||
|
||||
@ -984,13 +904,16 @@ class InvokeAIWebServer:
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
if generation_parameters["progress_latents"]:
|
||||
image = self.generate.sample_to_lowres_estimated_image(sample)
|
||||
(width, height) = image.size
|
||||
width *= 8
|
||||
height *= 8
|
||||
img_base64 = image_to_dataURL(image)
|
||||
buffered = io.BytesIO()
|
||||
image.save(buffered, format="PNG")
|
||||
img_base64 = "data:image/png;base64," + base64.b64encode(
|
||||
buffered.getvalue()
|
||||
).decode("UTF-8")
|
||||
self.socketio.emit(
|
||||
"intermediateResult",
|
||||
{
|
||||
@ -1008,7 +931,7 @@ class InvokeAIWebServer:
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
def image_done(image, seed, first_seed, attention_maps_image=None):
|
||||
def image_done(image, seed, first_seed):
|
||||
if self.canceled.is_set():
|
||||
raise CanceledException
|
||||
|
||||
@ -1031,7 +954,7 @@ class InvokeAIWebServer:
|
||||
**generation_parameters["bounding_box"],
|
||||
)
|
||||
|
||||
progress.set_current_status("common:statusGenerationComplete")
|
||||
progress.set_current_status("Generation Complete")
|
||||
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
@ -1058,7 +981,7 @@ class InvokeAIWebServer:
|
||||
raise CanceledException
|
||||
|
||||
if esrgan_parameters:
|
||||
progress.set_current_status("common:statusUpscaling")
|
||||
progress.set_current_status("Upscaling")
|
||||
progress.set_current_status_has_steps(False)
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
@ -1081,9 +1004,9 @@ class InvokeAIWebServer:
|
||||
|
||||
if facetool_parameters:
|
||||
if facetool_parameters["type"] == "gfpgan":
|
||||
progress.set_current_status("common:statusRestoringFacesGFPGAN")
|
||||
progress.set_current_status("Restoring Faces (GFPGAN)")
|
||||
elif facetool_parameters["type"] == "codeformer":
|
||||
progress.set_current_status("common:statusRestoringFacesCodeFormer")
|
||||
progress.set_current_status("Restoring Faces (Codeformer)")
|
||||
|
||||
progress.set_current_status_has_steps(False)
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
@ -1115,7 +1038,7 @@ class InvokeAIWebServer:
|
||||
]
|
||||
all_parameters["facetool_type"] = facetool_parameters["type"]
|
||||
|
||||
progress.set_current_status("common:statusSavingImage")
|
||||
progress.set_current_status("Saving Image")
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
@ -1162,7 +1085,7 @@ class InvokeAIWebServer:
|
||||
|
||||
if progress.total_iterations > progress.current_iteration:
|
||||
progress.set_current_step(1)
|
||||
progress.set_current_status("common:statusIterationComplete")
|
||||
progress.set_current_status("Iteration complete")
|
||||
progress.set_current_status_has_steps(False)
|
||||
else:
|
||||
progress.mark_complete()
|
||||
@ -1170,12 +1093,6 @@ class InvokeAIWebServer:
|
||||
self.socketio.emit("progressUpdate", progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
|
||||
parsed_prompt, _ = get_prompt_structure(generation_parameters["prompt"])
|
||||
tokens = None if type(parsed_prompt) is Blend else \
|
||||
get_tokens_for_prompt(self.generate.model, parsed_prompt)
|
||||
attention_maps_image_base64_url = None if attention_maps_image is None \
|
||||
else image_to_dataURL(attention_maps_image)
|
||||
|
||||
self.socketio.emit(
|
||||
"generationResult",
|
||||
{
|
||||
@ -1188,8 +1105,6 @@ class InvokeAIWebServer:
|
||||
"height": height,
|
||||
"boundingBox": original_bounding_box,
|
||||
"generationMode": generation_parameters["generation_mode"],
|
||||
"attentionMaps": attention_maps_image_base64_url,
|
||||
"tokens": tokens,
|
||||
},
|
||||
)
|
||||
eventlet.sleep(0)
|
||||
@ -1201,7 +1116,7 @@ class InvokeAIWebServer:
|
||||
self.generate.prompt2image(
|
||||
**generation_parameters,
|
||||
step_callback=image_progress,
|
||||
image_callback=image_done
|
||||
image_callback=image_done,
|
||||
)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
@ -1556,7 +1471,7 @@ class Progress:
|
||||
self.total_iterations = (
|
||||
generation_parameters["iterations"] if generation_parameters else 1
|
||||
)
|
||||
self.current_status = "common:statusPreparing"
|
||||
self.current_status = "Preparing"
|
||||
self.is_processing = True
|
||||
self.current_status_has_steps = False
|
||||
self.has_error = False
|
||||
@ -1586,7 +1501,7 @@ class Progress:
|
||||
self.has_error = has_error
|
||||
|
||||
def mark_complete(self):
|
||||
self.current_status = "common:statusProcessingComplete"
|
||||
self.current_status = "Processing Complete"
|
||||
self.current_step = 0
|
||||
self.total_steps = 0
|
||||
self.current_iteration = 0
|
||||
@ -1648,19 +1563,6 @@ def dataURL_to_image(dataURL: str) -> ImageType:
|
||||
)
|
||||
return image
|
||||
|
||||
"""
|
||||
Converts an image into a base64 image dataURL.
|
||||
"""
|
||||
|
||||
def image_to_dataURL(image: ImageType) -> str:
|
||||
buffered = io.BytesIO()
|
||||
image.save(buffered, format="PNG")
|
||||
image_base64 = "data:image/png;base64," + base64.b64encode(
|
||||
buffered.getvalue()
|
||||
).decode("UTF-8")
|
||||
return image_base64
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Converts a base64 image dataURL into bytes.
|
||||
|
@ -43,7 +43,7 @@ def get_canvas_generation_mode(
|
||||
)
|
||||
|
||||
"""
|
||||
Mask images are white in areas where no change should be made, black where changes
|
||||
Mask images are white in areas where no change should be made, black where changes
|
||||
should be made.
|
||||
"""
|
||||
|
||||
|
Binary file not shown.
@ -1,17 +0,0 @@
|
||||
InvokeAI
|
||||
|
||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
Installation on Windows:
|
||||
NOTE: You might need to enable Windows Long Paths. If you're not sure,
|
||||
then you almost certainly need to. Simply double-click the 'WinLongPathsEnabled.reg'
|
||||
file. Note that you will need to have admin privileges in order to
|
||||
do this.
|
||||
|
||||
Please double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
||||
|
||||
Installation on Linux and Mac:
|
||||
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
||||
|
||||
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh'
|
||||
file (on Linux/Mac) to start InvokeAI.
|
@ -31,7 +31,7 @@ stable-diffusion-1.4:
|
||||
width: 512
|
||||
height: 512
|
||||
waifu-diffusion-1.3:
|
||||
description: Stable Diffusion 1.4 fine tuned on anime-styled images (4.27 GB)
|
||||
description: Stable Diffusion 1.4 fine tuned on anime-styled images (4.27)
|
||||
repo_id: hakurei/waifu-diffusion-v1-3
|
||||
config: v1-inference.yaml
|
||||
file: model-epoch09-float32.ckpt
|
||||
|
@ -25,5 +25,3 @@ inpainting-1.5:
|
||||
config: configs/stable-diffusion/v1-inpainting-inference.yaml
|
||||
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
description: RunwayML SD 1.5 model optimized for inpainting
|
||||
width: 512
|
||||
height: 512
|
||||
|
@ -107,4 +107,4 @@ lightning:
|
||||
benchmark: True
|
||||
max_steps: 4000000
|
||||
# max_steps: 4000
|
||||
|
||||
|
||||
|
@ -32,7 +32,7 @@ model:
|
||||
placeholder_strings: ["*"]
|
||||
initializer_words: ['sculpture']
|
||||
per_image_tokens: false
|
||||
num_vectors_per_token: 1
|
||||
num_vectors_per_token: 8
|
||||
progressive_words: False
|
||||
|
||||
unet_config:
|
||||
|
@ -107,4 +107,4 @@ lightning:
|
||||
benchmark: False
|
||||
max_steps: 6200
|
||||
# max_steps: 4000
|
||||
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM python:3.10-slim AS builder
|
||||
FROM ubuntu:22.10
|
||||
|
||||
# use bash
|
||||
SHELL [ "/bin/bash", "-c" ]
|
||||
@ -7,59 +7,28 @@ SHELL [ "/bin/bash", "-c" ]
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc=4:10.2.* \
|
||||
libgl1-mesa-glx=20.3.* \
|
||||
libglib2.0-0=2.66.* \
|
||||
python3-dev=3.9.* \
|
||||
build-essential \
|
||||
gcc \
|
||||
git \
|
||||
libgl1-mesa-glx \
|
||||
libglib2.0-0 \
|
||||
pip \
|
||||
python3 \
|
||||
python3-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# set WORKDIR, PATH and copy sources
|
||||
ARG APPDIR=/usr/src/app
|
||||
WORKDIR ${APPDIR}
|
||||
ENV PATH ${APPDIR}/.venv/bin:$PATH
|
||||
# set workdir and copy sources
|
||||
WORKDIR /invokeai
|
||||
ARG PIP_REQUIREMENTS=requirements-lin-cuda.txt
|
||||
COPY . ./environments-and-requirements/${PIP_REQUIREMENTS} ./
|
||||
|
||||
# install requirements
|
||||
RUN python3 -m venv .venv \
|
||||
&& pip install \
|
||||
--upgrade \
|
||||
--no-cache-dir \
|
||||
'wheel>=0.38.4' \
|
||||
&& pip install \
|
||||
--no-cache-dir \
|
||||
-r ${PIP_REQUIREMENTS}
|
||||
# install requirements and link outputs folder
|
||||
RUN pip install \
|
||||
--no-cache-dir \
|
||||
-r ${PIP_REQUIREMENTS}
|
||||
|
||||
FROM python:3.10-slim AS runtime
|
||||
|
||||
# setup environment
|
||||
ARG APPDIR=/usr/src/app
|
||||
WORKDIR ${APPDIR}
|
||||
COPY --from=builder ${APPDIR} .
|
||||
ENV \
|
||||
PATH=${APPDIR}/.venv/bin:$PATH \
|
||||
INVOKEAI_ROOT=/data \
|
||||
INVOKE_MODEL_RECONFIGURE=--yes
|
||||
|
||||
# Install necesarry packages
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
build-essential=12.9 \
|
||||
libgl1-mesa-glx=20.3.* \
|
||||
libglib2.0-0=2.66.* \
|
||||
libopencv-dev=4.5.* \
|
||||
&& ln -sf \
|
||||
/usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv4.pc \
|
||||
/usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv.pc \
|
||||
&& python3 -c "from patchmatch import patch_match" \
|
||||
&& apt-get remove -y \
|
||||
--autoremove \
|
||||
build-essential \
|
||||
&& apt-get autoclean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# set Entrypoint and default CMD
|
||||
ENTRYPOINT [ "python3", "scripts/invoke.py" ]
|
||||
# set Environment, Entrypoint and default CMD
|
||||
ENV INVOKEAI_ROOT /data
|
||||
ENTRYPOINT [ "python3", "scripts/invoke.py", "--outdir=/data/outputs" ]
|
||||
CMD [ "--web", "--host=0.0.0.0" ]
|
||||
|
@ -1,86 +0,0 @@
|
||||
#######################
|
||||
#### Builder stage ####
|
||||
|
||||
FROM library/ubuntu:22.04 AS builder
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt update && apt-get install -y \
|
||||
git \
|
||||
libglib2.0-0 \
|
||||
libgl1-mesa-glx \
|
||||
python3-venv \
|
||||
python3-pip \
|
||||
build-essential \
|
||||
python3-opencv \
|
||||
libopencv-dev
|
||||
|
||||
# This is needed for patchmatch support
|
||||
RUN cd /usr/lib/x86_64-linux-gnu/pkgconfig/ &&\
|
||||
ln -sf opencv4.pc opencv.pc
|
||||
|
||||
ARG WORKDIR=/invokeai
|
||||
WORKDIR ${WORKDIR}
|
||||
|
||||
ENV VIRTUAL_ENV=${WORKDIR}/.venv
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
python3 -m venv ${VIRTUAL_ENV} &&\
|
||||
pip install --extra-index-url https://download.pytorch.org/whl/cu116 \
|
||||
torch==1.12.0+cu116 \
|
||||
torchvision==0.13.0+cu116 &&\
|
||||
pip install -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch
|
||||
|
||||
COPY . .
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
cp environments-and-requirements/requirements-lin-cuda.txt requirements.txt && \
|
||||
pip install -r requirements.txt &&\
|
||||
pip install -e .
|
||||
|
||||
|
||||
#######################
|
||||
#### Runtime stage ####
|
||||
|
||||
FROM library/ubuntu:22.04 as runtime
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt update && apt install -y --no-install-recommends \
|
||||
git \
|
||||
curl \
|
||||
ncdu \
|
||||
iotop \
|
||||
bzip2 \
|
||||
libglib2.0-0 \
|
||||
libgl1-mesa-glx \
|
||||
python3-venv \
|
||||
python3-pip \
|
||||
build-essential \
|
||||
python3-opencv \
|
||||
libopencv-dev &&\
|
||||
apt-get clean && apt-get autoclean
|
||||
|
||||
ARG WORKDIR=/invokeai
|
||||
WORKDIR ${WORKDIR}
|
||||
|
||||
ENV INVOKEAI_ROOT=/mnt/invokeai
|
||||
ENV VIRTUAL_ENV=${WORKDIR}/.venv
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
|
||||
COPY --from=builder ${WORKDIR} ${WORKDIR}
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/pkgconfig /usr/lib/x86_64-linux-gnu/pkgconfig
|
||||
|
||||
# build patchmatch
|
||||
RUN python -c "from patchmatch import patch_match"
|
||||
|
||||
## workaround for non-existent initfile when runtime directory is mounted; see #1613
|
||||
RUN touch /root/.invokeai
|
||||
|
||||
ENTRYPOINT ["bash"]
|
||||
|
||||
CMD ["-c", "python3 scripts/invoke.py --web --host 0.0.0.0"]
|
@ -1,44 +0,0 @@
|
||||
# Directory in the container where the INVOKEAI_ROOT (runtime dir) will be mounted
|
||||
INVOKEAI_ROOT=/mnt/invokeai
|
||||
# Host directory to contain the runtime dir. Will be mounted at INVOKEAI_ROOT path in the container
|
||||
HOST_MOUNT_PATH=${HOME}/invokeai
|
||||
|
||||
IMAGE=local/invokeai:latest
|
||||
|
||||
USER=$(shell id -u)
|
||||
GROUP=$(shell id -g)
|
||||
|
||||
# All downloaded models, config, etc will end up in ${HOST_MOUNT_PATH} on the host.
|
||||
# This is consistent with the expected non-Docker behaviour.
|
||||
# Contents can be moved to a persistent storage and used to prime the cache on another host.
|
||||
|
||||
build:
|
||||
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
|
||||
|
||||
configure:
|
||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
||||
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
||||
${IMAGE} -c "python scripts/configure_invokeai.py"
|
||||
|
||||
# Run the container with the runtime dir mounted and the web server exposed on port 9090
|
||||
web:
|
||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
||||
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
||||
-p 9090:9090 \
|
||||
${IMAGE} -c "python scripts/invoke.py --web --host 0.0.0.0"
|
||||
|
||||
# Run the cli with the runtime dir mounted
|
||||
cli:
|
||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
||||
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
||||
${IMAGE} -c "python scripts/invoke.py"
|
||||
|
||||
# Run the container with the runtime dir mounted and open a bash shell
|
||||
shell:
|
||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} ${IMAGE} --
|
||||
|
||||
.PHONY: build configure web cli shell
|
@ -1,35 +1,49 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
|
||||
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
|
||||
# configure values by using env when executing build.sh f.e. `env ARCH=aarch64 ./build.sh`
|
||||
|
||||
source ./docker-build/env.sh \
|
||||
|| echo "please execute docker-build/build.sh from repository root" \
|
||||
|| exit 1
|
||||
|
||||
PIP_REQUIREMENTS=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt}
|
||||
DOCKERFILE=${INVOKE_DOCKERFILE:-docker-build/Dockerfile}
|
||||
pip_requirements=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt}
|
||||
dockerfile=${INVOKE_DOCKERFILE:-docker-build/Dockerfile}
|
||||
|
||||
# print the settings
|
||||
echo -e "You are using these values:\n"
|
||||
echo -e "Dockerfile:\t ${DOCKERFILE}"
|
||||
echo -e "Requirements:\t ${PIP_REQUIREMENTS}"
|
||||
echo -e "Volumename:\t ${VOLUMENAME}"
|
||||
echo -e "arch:\t\t ${ARCH}"
|
||||
echo -e "Platform:\t ${PLATFORM}"
|
||||
echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n"
|
||||
echo "You are using these values:"
|
||||
echo -e "Dockerfile:\t\t ${dockerfile}"
|
||||
echo -e "requirements:\t\t ${pip_requirements}"
|
||||
echo -e "volumename:\t\t ${volumename}"
|
||||
echo -e "arch:\t\t\t ${arch}"
|
||||
echo -e "platform:\t\t ${platform}"
|
||||
echo -e "invokeai_tag:\t\t ${invokeai_tag}\n"
|
||||
|
||||
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
|
||||
echo -e "Volume already exists\n"
|
||||
if [[ -n "$(docker volume ls -f name="${volumename}" -q)" ]]; then
|
||||
echo "Volume already exists"
|
||||
echo
|
||||
else
|
||||
echo -n "createing docker volume "
|
||||
docker volume create "${VOLUMENAME}"
|
||||
docker volume create "${volumename}"
|
||||
fi
|
||||
|
||||
# Build Container
|
||||
docker build \
|
||||
--platform="${PLATFORM}" \
|
||||
--tag="${INVOKEAI_TAG}" \
|
||||
--build-arg="PIP_REQUIREMENTS=${PIP_REQUIREMENTS}" \
|
||||
--file="${DOCKERFILE}" \
|
||||
--platform="${platform}" \
|
||||
--tag="${invokeai_tag}" \
|
||||
--build-arg="PIP_REQUIREMENTS=${pip_requirements}" \
|
||||
--file="${dockerfile}" \
|
||||
.
|
||||
|
||||
docker run \
|
||||
--rm \
|
||||
--platform="$platform" \
|
||||
--name="$project_name" \
|
||||
--hostname="$project_name" \
|
||||
--mount="source=$volumename,target=/data" \
|
||||
--mount="type=bind,source=$HOME/.huggingface,target=/root/.huggingface" \
|
||||
--env="HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}" \
|
||||
--entrypoint="python3" \
|
||||
"${invokeai_tag}" \
|
||||
scripts/configure_invokeai.py --yes
|
||||
|
@ -1,10 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Variables shared by build.sh and run.sh
|
||||
REPOSITORY_NAME=${REPOSITORY_NAME:-$(basename "$(git rev-parse --show-toplevel)")}
|
||||
VOLUMENAME=${VOLUMENAME:-${REPOSITORY_NAME,,}_data}
|
||||
ARCH=${ARCH:-$(uname -m)}
|
||||
PLATFORM=${PLATFORM:-Linux/${ARCH}}
|
||||
CONTAINER_FLAVOR=${CONTAINER_FLAVOR:-cuda}
|
||||
INVOKEAI_BRANCH=$(git branch --show)
|
||||
INVOKEAI_TAG=${REPOSITORY_NAME,,}-${CONTAINER_FLAVOR}:${INVOKEAI_TAG:-${INVOKEAI_BRANCH/\//-}}
|
||||
project_name=${PROJECT_NAME:-invokeai}
|
||||
volumename=${VOLUMENAME:-${project_name}_data}
|
||||
arch=${ARCH:-x86_64}
|
||||
platform=${PLATFORM:-Linux/${arch}}
|
||||
invokeai_tag=${INVOKEAI_TAG:-${project_name}:${arch}}
|
||||
|
||||
export project_name
|
||||
export volumename
|
||||
export arch
|
||||
export platform
|
||||
export invokeai_tag
|
||||
|
@ -1,31 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
|
||||
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
|
||||
|
||||
source ./docker-build/env.sh \
|
||||
|| echo "please run from repository root" \
|
||||
|| exit 1
|
||||
|
||||
# check if HUGGINGFACE_TOKEN is available
|
||||
# You must have accepted the terms of use for required models
|
||||
HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN:?Please set your token for Huggingface as HUGGINGFACE_TOKEN}
|
||||
|
||||
echo -e "You are using these values:\n"
|
||||
echo -e "Volumename:\t ${VOLUMENAME}"
|
||||
echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n"
|
||||
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
||||
|
||||
docker run \
|
||||
--interactive \
|
||||
--tty \
|
||||
--rm \
|
||||
--platform="$PLATFORM" \
|
||||
--name="${REPOSITORY_NAME,,}" \
|
||||
--hostname="${REPOSITORY_NAME,,}" \
|
||||
--mount="source=$VOLUMENAME,target=/data" \
|
||||
--env="HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}" \
|
||||
--platform="$platform" \
|
||||
--name="$project_name" \
|
||||
--hostname="$project_name" \
|
||||
--mount="source=$volumename,target=/data" \
|
||||
--publish=9090:9090 \
|
||||
--cap-add=sys_nice \
|
||||
${GPU_FLAGS:+--gpus=${GPU_FLAGS}} \
|
||||
"$INVOKEAI_TAG" ${1:+$@}
|
||||
"$invokeai_tag" ${1:+$@}
|
||||
|
@ -4,275 +4,180 @@ title: Changelog
|
||||
|
||||
# :octicons-log-16: **Changelog**
|
||||
|
||||
## v2.2.4 <small>(11 December 2022)</small>
|
||||
|
||||
**the `invokeai` directory**
|
||||
|
||||
Previously there were two directories to worry about, the directory that
|
||||
contained the InvokeAI source code and the launcher scripts, and the `invokeai`
|
||||
directory that contained the models files, embeddings, configuration and
|
||||
outputs. With the 2.2.4 release, this dual system is done away with, and
|
||||
everything, including the `invoke.bat` and `invoke.sh` launcher scripts, now
|
||||
live in a directory named `invokeai`. By default this directory is located in
|
||||
your home directory (e.g. `\Users\yourname` on Windows), but you can select
|
||||
where it goes at install time.
|
||||
|
||||
After installation, you can delete the install directory (the one that the zip
|
||||
file creates when it unpacks). Do **not** delete or move the `invokeai`
|
||||
directory!
|
||||
|
||||
**Initialization file `invokeai/invokeai.init`**
|
||||
|
||||
You can place frequently-used startup options in this file, such as the default
|
||||
number of steps or your preferred sampler. To keep everything in one place, this
|
||||
file has now been moved into the `invokeai` directory and is named
|
||||
`invokeai.init`.
|
||||
|
||||
**To update from Version 2.2.3**
|
||||
|
||||
The easiest route is to download and unpack one of the 2.2.4 installer files.
|
||||
When it asks you for the location of the `invokeai` runtime directory, respond
|
||||
with the path to the directory that contains your 2.2.3 `invokeai`. That is, if
|
||||
`invokeai` lives at `C:\Users\fred\invokeai`, then answer with `C:\Users\fred`
|
||||
and answer "Y" when asked if you want to reuse the directory.
|
||||
|
||||
The `update.sh` (`update.bat`) script that came with the 2.2.3 source installer
|
||||
does not know about the new directory layout and won't be fully functional.
|
||||
|
||||
**To update to 2.2.5 (and beyond) there's now an update path**
|
||||
|
||||
As they become available, you can update to more recent versions of InvokeAI
|
||||
using an `update.sh` (`update.bat`) script located in the `invokeai` directory.
|
||||
Running it without any arguments will install the most recent version of
|
||||
InvokeAI. Alternatively, you can get set releases by running the `update.sh`
|
||||
script with an argument in the command shell. This syntax accepts the path to
|
||||
the desired release's zip file, which you can find by clicking on the green
|
||||
"Code" button on this repository's home page.
|
||||
|
||||
**Other 2.2.4 Improvements**
|
||||
|
||||
- Fix InvokeAI GUI initialization by @addianto in #1687
|
||||
- fix link in documentation by @lstein in #1728
|
||||
- Fix broken link by @ShawnZhong in #1736
|
||||
- Remove reference to binary installer by @lstein in #1731
|
||||
- documentation fixes for 2.2.3 by @lstein in #1740
|
||||
- Modify installer links to point closer to the source installer by @ebr in
|
||||
#1745
|
||||
- add documentation warning about 1650/60 cards by @lstein in #1753
|
||||
- Fix Linux source URL in installation docs by @andybearman in #1756
|
||||
- Make install instructions discoverable in readme by @damian0815 in #1752
|
||||
- typo fix by @ofirkris in #1755
|
||||
- Non-interactive model download (support HUGGINGFACE_TOKEN) by @ebr in #1578
|
||||
- fix(srcinstall): shell installer - cp scripts instead of linking by @tildebyte
|
||||
in #1765
|
||||
- stability and usage improvements to binary & source installers by @lstein in
|
||||
#1760
|
||||
- fix off-by-one bug in cross-attention-control by @damian0815 in #1774
|
||||
- Eventually update APP_VERSION to 2.2.3 by @spezialspezial in #1768
|
||||
- invoke script cds to its location before running by @lstein in #1805
|
||||
- Make PaperCut and VoxelArt models load again by @lstein in #1730
|
||||
- Fix --embedding_directory / --embedding_path not working by @blessedcoolant in
|
||||
#1817
|
||||
- Clean up readme by @hipsterusername in #1820
|
||||
- Optimized Docker build with support for external working directory by @ebr in
|
||||
#1544
|
||||
- disable pushing the cloud container by @mauwii in #1831
|
||||
- Fix docker push github action and expand with additional metadata by @ebr in
|
||||
#1837
|
||||
- Fix Broken Link To Notebook by @VedantMadane in #1821
|
||||
- Account for flat models by @spezialspezial in #1766
|
||||
- Update invoke.bat.in isolate environment variables by @lynnewu in #1833
|
||||
- Arch Linux Specific PatchMatch Instructions & fixing conda install on linux by
|
||||
@SammCheese in #1848
|
||||
- Make force free GPU memory work in img2img by @addianto in #1844
|
||||
- New installer by @lstein
|
||||
|
||||
## v2.2.3 <small>(2 December 2022)</small>
|
||||
|
||||
!!! Note
|
||||
|
||||
This point release removes references to the binary installer from the
|
||||
installation guide. The binary installer is not stable at the current
|
||||
time. First time users are encouraged to use the "source" installer as
|
||||
described in [Installing InvokeAI with the Source Installer](installation/INSTALL_SOURCE.md)
|
||||
|
||||
With InvokeAI 2.2, this project now provides enthusiasts and professionals a
|
||||
robust workflow solution for creating AI-generated and human facilitated
|
||||
compositions. Additional enhancements have been made as well, improving safety,
|
||||
ease of use, and installation.
|
||||
|
||||
Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
|
||||
512x768 image (and less for smaller images), and is compatible with
|
||||
Windows/Linux/Mac (M1 & M2).
|
||||
|
||||
You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
|
||||
introduces the main WebUI enhancement for version 2.2 -
|
||||
[The Unified Canvas](features/UNIFIED_CANVAS.md). This new workflow is the
|
||||
biggest enhancement added to the WebUI to date, and unlocks a stunning amount of
|
||||
potential for users to create and iterate on their creations. The following
|
||||
sections describe what's new for InvokeAI.
|
||||
|
||||
## v2.2.2 <small>(30 November 2022)</small>
|
||||
|
||||
!!! note
|
||||
|
||||
The binary installer is not ready for prime time. First time users are recommended to install via the "source" installer accessible through the links at the bottom of this page.****
|
||||
|
||||
With InvokeAI 2.2, this project now provides enthusiasts and professionals a
|
||||
robust workflow solution for creating AI-generated and human facilitated
|
||||
compositions. Additional enhancements have been made as well, improving safety,
|
||||
ease of use, and installation.
|
||||
|
||||
Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
|
||||
512x768 image (and less for smaller images), and is compatible with
|
||||
Windows/Linux/Mac (M1 & M2).
|
||||
|
||||
You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
|
||||
introduces the main WebUI enhancement for version 2.2 -
|
||||
[The Unified Canvas](https://invoke-ai.github.io/InvokeAI/features/UNIFIED_CANVAS/).
|
||||
This new workflow is the biggest enhancement added to the WebUI to date, and
|
||||
unlocks a stunning amount of potential for users to create and iterate on their
|
||||
creations. The following sections describe what's new for InvokeAI.
|
||||
|
||||
## v2.2.0 <small>(2 December 2022)</small>
|
||||
|
||||
With InvokeAI 2.2, this project now provides enthusiasts and professionals a
|
||||
robust workflow solution for creating AI-generated and human facilitated
|
||||
compositions. Additional enhancements have been made as well, improving safety,
|
||||
ease of use, and installation.
|
||||
|
||||
Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
|
||||
512x768 image (and less for smaller images), and is compatible with
|
||||
Windows/Linux/Mac (M1 & M2).
|
||||
|
||||
You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
|
||||
introduces the main WebUI enhancement for version 2.2 -
|
||||
[The Unified Canvas](features/UNIFIED_CANVAS.md). This new workflow is the
|
||||
biggest enhancement added to the WebUI to date, and unlocks a stunning amount of
|
||||
potential for users to create and iterate on their creations. The following
|
||||
sections describe what's new for InvokeAI.
|
||||
|
||||
## v2.1.3 <small>(13 November 2022)</small>
|
||||
|
||||
- A choice of installer scripts that automate installation and configuration.
|
||||
See
|
||||
[Installation](installation/index.md).
|
||||
- A streamlined manual installation process that works for both Conda and
|
||||
PIP-only installs. See
|
||||
[Manual Installation](installation/INSTALL_MANUAL.md).
|
||||
- The ability to save frequently-used startup options (model to load, steps,
|
||||
sampler, etc) in a `.invokeai` file. See
|
||||
[Client](features/CLI.md)
|
||||
- Support for AMD GPU cards (non-CUDA) on Linux machines.
|
||||
- Multiple bugs and edge cases squashed.
|
||||
|
||||
## v2.1.0 <small>(2 November 2022)</small>
|
||||
|
||||
- update mac instructions to use invokeai for env name by @willwillems in #1030
|
||||
- Update .gitignore by @blessedcoolant in #1040
|
||||
- reintroduce fix for m1 from #579 missing after merge by @skurovec in #1056
|
||||
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in #1060
|
||||
- Print out the device type which is used by @manzke in #1073
|
||||
- Hires Addition by @hipsterusername in #1063
|
||||
- update mac instructions to use invokeai for env name by @willwillems in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1030
|
||||
- Update .gitignore by @blessedcoolant in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1040
|
||||
- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579
|
||||
missing after merge by @skurovec in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1056
|
||||
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1060
|
||||
- Print out the device type which is used by @manzke in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1073
|
||||
- Hires Addition by @hipsterusername in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1063
|
||||
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
||||
@skurovec in #1081
|
||||
@skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
|
||||
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
||||
warning by @db3000 in #1077
|
||||
- fix noisy images at high step counts by @lstein in #1086
|
||||
- Generalize facetool strength argument by @db3000 in #1078
|
||||
warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
|
||||
- fix noisy images at high step counts by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1086
|
||||
- Generalize facetool strength argument by @db3000 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1078
|
||||
- Enable fast switching among models at the invoke> command line by @lstein in
|
||||
#1066
|
||||
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in #1095
|
||||
- Update generate.py by @unreleased in #1109
|
||||
- Update 'ldm' env to 'invokeai' in troubleshooting steps by @19wolf in #1125
|
||||
- Fixed documentation typos and resolved merge conflicts by @rupeshs in #1123
|
||||
- Fix broken doc links, fix malaprop in the project subtitle by @majick in #1131
|
||||
- Only output facetool parameters if enhancing faces by @db3000 in #1119
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1066
|
||||
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1095
|
||||
- Update generate.py by @unreleased in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1109
|
||||
- Update 'ldm' env to 'invokeai' in troubleshooting steps by @19wolf in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1125
|
||||
- Fixed documentation typos and resolved merge conflicts by @rupeshs in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1123
|
||||
- Fix broken doc links, fix malaprop in the project subtitle by @majick in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1131
|
||||
- Only output facetool parameters if enhancing faces by @db3000 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1119
|
||||
- Update gitignore to ignore codeformer weights at new location by
|
||||
@spezialspezial in #1136
|
||||
- fix links to point to invoke-ai.github.io #1117 by @mauwii in #1143
|
||||
- Rework-mkdocs by @mauwii in #1144
|
||||
@spezialspezial in https://github.com/invoke-ai/InvokeAI/pull/1136
|
||||
- fix links to point to invoke-ai.github.io #1117 by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1143
|
||||
- Rework-mkdocs by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1144
|
||||
- add option to CLI and pngwriter that allows user to set PNG compression level
|
||||
by @lstein in #1127
|
||||
- Fix img2img DDIM index out of bound by @wfng92 in #1137
|
||||
- Fix gh actions by @mauwii in #1128
|
||||
- update mac instructions to use invokeai for env name by @willwillems in #1030
|
||||
- Update .gitignore by @blessedcoolant in #1040
|
||||
- reintroduce fix for m1 from #579 missing after merge by @skurovec in #1056
|
||||
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in #1060
|
||||
- Print out the device type which is used by @manzke in #1073
|
||||
- Hires Addition by @hipsterusername in #1063
|
||||
by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
|
||||
- Fix img2img DDIM index out of bound by @wfng92 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1137
|
||||
- Fix gh actions by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1128
|
||||
- update mac instructions to use invokeai for env name by @willwillems in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1030
|
||||
- Update .gitignore by @blessedcoolant in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1040
|
||||
- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579
|
||||
missing after merge by @skurovec in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1056
|
||||
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1060
|
||||
- Print out the device type which is used by @manzke in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1073
|
||||
- Hires Addition by @hipsterusername in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1063
|
||||
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
||||
@skurovec in #1081
|
||||
@skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
|
||||
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
||||
warning by @db3000 in #1077
|
||||
- fix noisy images at high step counts by @lstein in #1086
|
||||
- Generalize facetool strength argument by @db3000 in #1078
|
||||
warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
|
||||
- fix noisy images at high step counts by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1086
|
||||
- Generalize facetool strength argument by @db3000 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1078
|
||||
- Enable fast switching among models at the invoke> command line by @lstein in
|
||||
#1066
|
||||
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in #1095
|
||||
- Fixed documentation typos and resolved merge conflicts by @rupeshs in #1123
|
||||
- Only output facetool parameters if enhancing faces by @db3000 in #1119
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1066
|
||||
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1095
|
||||
- Fixed documentation typos and resolved merge conflicts by @rupeshs in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1123
|
||||
- Only output facetool parameters if enhancing faces by @db3000 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1119
|
||||
- add option to CLI and pngwriter that allows user to set PNG compression level
|
||||
by @lstein in #1127
|
||||
- Fix img2img DDIM index out of bound by @wfng92 in #1137
|
||||
- Add text prompt to inpaint mask support by @lstein in #1133
|
||||
by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
|
||||
- Fix img2img DDIM index out of bound by @wfng92 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1137
|
||||
- Add text prompt to inpaint mask support by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1133
|
||||
- Respect http[s] protocol when making socket.io middleware by @damian0815 in
|
||||
#976
|
||||
- WebUI: Adds Codeformer support by @psychedelicious in #1151
|
||||
- Skips normalizing prompts for web UI metadata by @psychedelicious in #1165
|
||||
- Add Asymmetric Tiling by @carson-katri in #1132
|
||||
- Web UI: Increases max CFG Scale to 200 by @psychedelicious in #1172
|
||||
https://github.com/invoke-ai/InvokeAI/pull/976
|
||||
- WebUI: Adds Codeformer support by @psychedelicious in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1151
|
||||
- Skips normalizing prompts for web UI metadata by @psychedelicious in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1165
|
||||
- Add Asymmetric Tiling by @carson-katri in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1132
|
||||
- Web UI: Increases max CFG Scale to 200 by @psychedelicious in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1172
|
||||
- Corrects color channels in face restoration; Fixes #1167 by @psychedelicious
|
||||
in #1175
|
||||
in https://github.com/invoke-ai/InvokeAI/pull/1175
|
||||
- Flips channels using array slicing instead of using OpenCV by @psychedelicious
|
||||
in #1178
|
||||
- Fix typo in docs: s/Formally/Formerly by @noodlebox in #1176
|
||||
- fix clipseg loading problems by @lstein in #1177
|
||||
- Correct color channels in upscale using array slicing by @wfng92 in #1181
|
||||
in https://github.com/invoke-ai/InvokeAI/pull/1178
|
||||
- Fix typo in docs: s/Formally/Formerly by @noodlebox in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1176
|
||||
- fix clipseg loading problems by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1177
|
||||
- Correct color channels in upscale using array slicing by @wfng92 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1181
|
||||
- Web UI: Filters existing images when adding new images; Fixes #1085 by
|
||||
@psychedelicious in #1171
|
||||
- fix a number of bugs in textual inversion by @lstein in #1190
|
||||
- Improve !fetch, add !replay command by @ArDiouscuros in #882
|
||||
- Fix generation of image with s>1000 by @holstvoogd in #951
|
||||
- Web UI: Gallery improvements by @psychedelicious in #1198
|
||||
- Update CLI.md by @krummrey in #1211
|
||||
- outcropping improvements by @lstein in #1207
|
||||
- add support for loading VAE autoencoders by @lstein in #1216
|
||||
- remove duplicate fix_func for MPS by @wfng92 in #1210
|
||||
- Metadata storage and retrieval fixes by @lstein in #1204
|
||||
- nix: add shell.nix file by @Cloudef in #1170
|
||||
- Web UI: Changes vite dist asset paths to relative by @psychedelicious in #1185
|
||||
- Web UI: Removes isDisabled from PromptInput by @psychedelicious in #1187
|
||||
@psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1171
|
||||
- fix a number of bugs in textual inversion by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1190
|
||||
- Improve !fetch, add !replay command by @ArDiouscuros in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/882
|
||||
- Fix generation of image with s>1000 by @holstvoogd in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/951
|
||||
- Web UI: Gallery improvements by @psychedelicious in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1198
|
||||
- Update CLI.md by @krummrey in https://github.com/invoke-ai/InvokeAI/pull/1211
|
||||
- outcropping improvements by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1207
|
||||
- add support for loading VAE autoencoders by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1216
|
||||
- remove duplicate fix_func for MPS by @wfng92 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1210
|
||||
- Metadata storage and retrieval fixes by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1204
|
||||
- nix: add shell.nix file by @Cloudef in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1170
|
||||
- Web UI: Changes vite dist asset paths to relative by @psychedelicious in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1185
|
||||
- Web UI: Removes isDisabled from PromptInput by @psychedelicious in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1187
|
||||
- Allow user to generate images with initial noise as on M1 / mps system by
|
||||
@ArDiouscuros in #981
|
||||
- feat: adding filename format template by @plucked in #968
|
||||
- Web UI: Fixes broken bundle by @psychedelicious in #1242
|
||||
- Support runwayML custom inpainting model by @lstein in #1243
|
||||
- Update IMG2IMG.md by @talitore in #1262
|
||||
@ArDiouscuros in https://github.com/invoke-ai/InvokeAI/pull/981
|
||||
- feat: adding filename format template by @plucked in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/968
|
||||
- Web UI: Fixes broken bundle by @psychedelicious in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1242
|
||||
- Support runwayML custom inpainting model by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1243
|
||||
- Update IMG2IMG.md by @talitore in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1262
|
||||
- New dockerfile - including a build- and a run- script as well as a GH-Action
|
||||
by @mauwii in #1233
|
||||
by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1233
|
||||
- cut over from karras to model noise schedule for higher steps by @lstein in
|
||||
#1222
|
||||
- Prompt tweaks by @lstein in #1268
|
||||
- Outpainting implementation by @Kyle0654 in #1251
|
||||
- fixing aspect ratio on hires by @tjennings in #1249
|
||||
- Fix-build-container-action by @mauwii in #1274
|
||||
- handle all unicode characters by @damian0815 in #1276
|
||||
- adds models.user.yml to .gitignore by @JakeHL in #1281
|
||||
- remove debug branch, set fail-fast to false by @mauwii in #1284
|
||||
- Protect-secrets-on-pr by @mauwii in #1285
|
||||
- Web UI: Adds initial inpainting implementation by @psychedelicious in #1225
|
||||
- fix environment-mac.yml - tested on x64 and arm64 by @mauwii in #1289
|
||||
- Use proper authentication to download model by @mauwii in #1287
|
||||
- Prevent indexing error for mode RGB by @spezialspezial in #1294
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1222
|
||||
- Prompt tweaks by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1268
|
||||
- Outpainting implementation by @Kyle0654 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1251
|
||||
- fixing aspect ratio on hires by @tjennings in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1249
|
||||
- Fix-build-container-action by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1274
|
||||
- handle all unicode characters by @damian0815 in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1276
|
||||
- adds models.user.yml to .gitignore by @JakeHL in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1281
|
||||
- remove debug branch, set fail-fast to false by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1284
|
||||
- Protect-secrets-on-pr by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1285
|
||||
- Web UI: Adds initial inpainting implementation by @psychedelicious in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1225
|
||||
- fix environment-mac.yml - tested on x64 and arm64 by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1289
|
||||
- Use proper authentication to download model by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1287
|
||||
- Prevent indexing error for mode RGB by @spezialspezial in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1294
|
||||
- Integrate sd-v1-5 model into test matrix (easily expandable), remove
|
||||
unecesarry caches by @mauwii in #1293
|
||||
- add --no-interactive to configure_invokeai step by @mauwii in #1302
|
||||
unecesarry caches by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1293
|
||||
- add --no-interactive to preload_models step by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1302
|
||||
- 1-click installer and updater. Uses micromamba to install git and conda into a
|
||||
contained environment (if necessary) before running the normal installation
|
||||
script by @cmdr2 in #1253
|
||||
- configure_invokeai.py script downloads the weight files by @lstein in #1290
|
||||
script by @cmdr2 in https://github.com/invoke-ai/InvokeAI/pull/1253
|
||||
- preload_models.py script downloads the weight files by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1290
|
||||
|
||||
## v2.0.1 <small>(13 October 2022)</small>
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
title: Command-Line Interface
|
||||
title: CLI
|
||||
---
|
||||
|
||||
# :material-bash: CLI
|
||||
@ -130,34 +130,20 @@ file should contain the startup options as you would type them on the
|
||||
command line (`--steps=10 --grid`), one argument per line, or a
|
||||
mixture of both using any of the accepted command switch formats:
|
||||
|
||||
!!! example "my unmodified initialization file"
|
||||
!!! example ""
|
||||
|
||||
```bash title="~/.invokeai" linenums="1"
|
||||
# InvokeAI initialization file
|
||||
# This is the InvokeAI initialization file, which contains command-line default values.
|
||||
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
|
||||
# or renaming it and then running configure_invokeai.py again.
|
||||
|
||||
# The --root option below points to the folder in which InvokeAI stores its models, configs and outputs.
|
||||
--root="/Users/mauwii/invokeai"
|
||||
|
||||
# the --outdir option controls the default location of image files.
|
||||
--outdir="/Users/mauwii/invokeai/outputs"
|
||||
|
||||
# You may place other frequently-used startup commands here, one or more per line.
|
||||
# Examples:
|
||||
# --web --host=0.0.0.0
|
||||
# --steps=20
|
||||
# -Ak_euler_a -C10.0
|
||||
```bash
|
||||
--web
|
||||
--steps=28
|
||||
--grid
|
||||
-f 0.6 -C 11.0 -A k_euler_a
|
||||
```
|
||||
|
||||
!!! note
|
||||
|
||||
The initialization file only accepts the command line arguments.
|
||||
There are additional arguments that you can provide on the `invoke>` command
|
||||
line (such as `-n` or `--iterations`) that cannot be entered into this file.
|
||||
Also be alert for empty blank lines at the end of the file, which will cause
|
||||
an arguments error at startup time.
|
||||
Note that the initialization file only accepts the command line arguments.
|
||||
There are additional arguments that you can provide on the `invoke>` command
|
||||
line (such as `-n` or `--iterations`) that cannot be entered into this file.
|
||||
Also be alert for empty blank lines at the end of the file, which will cause
|
||||
an arguments error at startup time.
|
||||
|
||||
## List of prompt arguments
|
||||
|
||||
@ -209,17 +195,15 @@ Here are the invoke> command that apply to txt2img:
|
||||
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
|
||||
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
|
||||
|
||||
!!! note
|
||||
Note that the width and height of the image must be multiples of 64. You can
|
||||
provide different values, but they will be rounded down to the nearest multiple
|
||||
of 64.
|
||||
|
||||
the width and height of the image must be multiples of 64. You can
|
||||
provide different values, but they will be rounded down to the nearest multiple
|
||||
of 64.
|
||||
### This is an example of img2img:
|
||||
|
||||
!!! example "This is a example of img2img"
|
||||
|
||||
```bash
|
||||
invoke> waterfall and rainbow -I./vacation-photo.png -W640 -H480 --fit
|
||||
```
|
||||
```
|
||||
invoke> waterfall and rainbow -I./vacation-photo.png -W640 -H480 --fit
|
||||
```
|
||||
|
||||
This will modify the indicated vacation photograph by making it more like the
|
||||
prompt. Results will vary greatly depending on what is in the image. We also ask
|
||||
@ -269,7 +253,7 @@ description of the part of the image to replace. For example, if you have an
|
||||
image of a breakfast plate with a bagel, toast and scrambled eggs, you can
|
||||
selectively mask the bagel and replace it with a piece of cake this way:
|
||||
|
||||
```bash
|
||||
```
|
||||
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel
|
||||
```
|
||||
|
||||
@ -281,7 +265,7 @@ are getting too much or too little masking you can adjust the threshold down (to
|
||||
get more mask), or up (to get less). In this example, by passing `-tm` a higher
|
||||
value, we are insisting on a more stringent classification.
|
||||
|
||||
```bash
|
||||
```
|
||||
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel 0.6
|
||||
```
|
||||
|
||||
@ -291,16 +275,16 @@ You can load and use hundreds of community-contributed Textual
|
||||
Inversion models just by typing the appropriate trigger phrase. Please
|
||||
see [Concepts Library](CONCEPTS.md) for more details.
|
||||
|
||||
## Other Commands
|
||||
# Other Commands
|
||||
|
||||
The CLI offers a number of commands that begin with "!".
|
||||
|
||||
### Postprocessing images
|
||||
## Postprocessing images
|
||||
|
||||
To postprocess a file using face restoration or upscaling, use the `!fix`
|
||||
command.
|
||||
|
||||
#### `!fix`
|
||||
### `!fix`
|
||||
|
||||
This command runs a post-processor on a previously-generated image. It takes a
|
||||
PNG filename or path and applies your choice of the `-U`, `-G`, or `--embiggen`
|
||||
@ -327,19 +311,19 @@ Some examples:
|
||||
[1] outputs/img-samples/000017.4829112.gfpgan-00.png: !fix "outputs/img-samples/0000045.4829112.png" -s 50 -S -W 512 -H 512 -C 7.5 -A k_lms -G 0.8
|
||||
```
|
||||
|
||||
#### `!mask`
|
||||
### !mask
|
||||
|
||||
This command takes an image, a text prompt, and uses the `clipseg` algorithm to
|
||||
automatically generate a mask of the area that matches the text prompt. It is
|
||||
useful for debugging the text masking process prior to inpainting with the
|
||||
`--text_mask` argument. See [INPAINTING.md] for details.
|
||||
|
||||
### Model selection and importation
|
||||
## Model selection and importation
|
||||
|
||||
The CLI allows you to add new models on the fly, as well as to switch among them
|
||||
rapidly without leaving the script.
|
||||
|
||||
#### `!models`
|
||||
### !models
|
||||
|
||||
This prints out a list of the models defined in `config/models.yaml'. The active
|
||||
model is bold-faced
|
||||
@ -352,7 +336,7 @@ laion400m not loaded <no description>
|
||||
waifu-diffusion not loaded Waifu Diffusion v1.3
|
||||
</pre>
|
||||
|
||||
#### `!switch <model>`
|
||||
### !switch <model>
|
||||
|
||||
This quickly switches from one model to another without leaving the CLI script.
|
||||
`invoke.py` uses a memory caching system; once a model has been loaded,
|
||||
@ -377,7 +361,7 @@ invoke> !switch waifu-diffusion
|
||||
| Making attention of type 'vanilla' with 512 in_channels
|
||||
| Using faster float16 precision
|
||||
>> Model loaded in 18.24s
|
||||
>> Max VRAM used to load the model: 2.17G
|
||||
>> Max VRAM used to load the model: 2.17G
|
||||
>> Current VRAM usage:2.17G
|
||||
>> Setting Sampler to k_lms
|
||||
|
||||
@ -397,7 +381,7 @@ laion400m not loaded <no description>
|
||||
waifu-diffusion cached Waifu Diffusion v1.3
|
||||
</pre>
|
||||
|
||||
#### `!import_model <path/to/model/weights>`
|
||||
### !import_model <path/to/model/weights>
|
||||
|
||||
This command imports a new model weights file into InvokeAI, makes it available
|
||||
for image generation within the script, and writes out the configuration for the
|
||||
@ -444,10 +428,10 @@ OK to import [n]? <b>y</b>
|
||||
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
|
||||
| Making attention of type 'vanilla' with 512 in_channels
|
||||
| Using faster float16 precision
|
||||
invoke>
|
||||
invoke>
|
||||
</pre>
|
||||
|
||||
#### `!edit_model <name_of_model>`
|
||||
###!edit_model <name_of_model>
|
||||
|
||||
The `!edit_model` command can be used to modify a model that is already defined
|
||||
in `config/models.yaml`. Call it with the short name of the model you wish to
|
||||
@ -484,12 +468,12 @@ text... Outputs: [2] outputs/img-samples/000018.2273800735.embiggen-00.png: !fix
|
||||
"outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512
|
||||
-H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25 ```
|
||||
|
||||
### History processing
|
||||
## History processing
|
||||
|
||||
The CLI provides a series of convenient commands for reviewing previous actions,
|
||||
retrieving them, modifying them, and re-running them.
|
||||
|
||||
#### `!history`
|
||||
### !history
|
||||
|
||||
The invoke script keeps track of all the commands you issue during a session,
|
||||
allowing you to re-run them. On Mac and Linux systems, it also writes the
|
||||
@ -501,22 +485,20 @@ during the session (Windows), or the most recent 1000 commands (Mac|Linux). You
|
||||
can then repeat a command by using the command `!NNN`, where "NNN" is the
|
||||
history line number. For example:
|
||||
|
||||
!!! example ""
|
||||
```bash
|
||||
invoke> !history
|
||||
...
|
||||
[14] happy woman sitting under tree wearing broad hat and flowing garment
|
||||
[15] beautiful woman sitting under tree wearing broad hat and flowing garment
|
||||
[18] beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6
|
||||
[20] watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
...
|
||||
invoke> !20
|
||||
invoke> watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
```
|
||||
|
||||
```bash
|
||||
invoke> !history
|
||||
...
|
||||
[14] happy woman sitting under tree wearing broad hat and flowing garment
|
||||
[15] beautiful woman sitting under tree wearing broad hat and flowing garment
|
||||
[18] beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6
|
||||
[20] watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
...
|
||||
invoke> !20
|
||||
invoke> watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
```
|
||||
|
||||
####`!fetch`
|
||||
### !fetch
|
||||
|
||||
This command retrieves the generation parameters from a previously generated
|
||||
image and either loads them into the command line (Linux|Mac), or prints them
|
||||
@ -526,36 +508,33 @@ a folder with image png files, and wildcard \*.png to retrieve the dream command
|
||||
used to generate the images, and save them to a file commands.txt for further
|
||||
processing.
|
||||
|
||||
!!! example "load the generation command for a single png file"
|
||||
This example loads the generation command for a single png file:
|
||||
|
||||
```bash
|
||||
invoke> !fetch 0000015.8929913.png
|
||||
# the script returns the next line, ready for editing and running:
|
||||
invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
|
||||
```
|
||||
```bash
|
||||
invoke> !fetch 0000015.8929913.png
|
||||
# the script returns the next line, ready for editing and running:
|
||||
invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
|
||||
```
|
||||
|
||||
!!! example "fetch the generation commands from a batch of files and store them into `selected.txt`"
|
||||
This one fetches the generation commands from a batch of files and stores them
|
||||
into `selected.txt`:
|
||||
|
||||
```bash
|
||||
invoke> !fetch outputs\selected-imgs\*.png selected.txt
|
||||
```
|
||||
```bash
|
||||
invoke> !fetch outputs\selected-imgs\*.png selected.txt
|
||||
```
|
||||
|
||||
#### `!replay`
|
||||
### !replay
|
||||
|
||||
This command replays a text file generated by !fetch or created manually
|
||||
|
||||
!!! example
|
||||
```
|
||||
invoke> !replay outputs\selected-imgs\selected.txt
|
||||
```
|
||||
|
||||
```bash
|
||||
invoke> !replay outputs\selected-imgs\selected.txt
|
||||
```
|
||||
Note that these commands may behave unexpectedly if given a PNG file that was
|
||||
not generated by InvokeAI.
|
||||
|
||||
!!! note
|
||||
|
||||
These commands may behave unexpectedly if given a PNG file that was
|
||||
not generated by InvokeAI.
|
||||
|
||||
#### `!search <search string>`
|
||||
### !search <search string>
|
||||
|
||||
This is similar to !history but it only returns lines that contain
|
||||
`search string`. For example:
|
||||
@ -565,7 +544,7 @@ invoke> !search surreal
|
||||
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
```
|
||||
|
||||
#### `!clear`
|
||||
### `!clear`
|
||||
|
||||
This clears the search history from memory and disk. Be advised that this
|
||||
operation is irreversible and does not issue any warnings!
|
||||
|
@ -1,129 +1,130 @@
|
||||
---
|
||||
title: Concepts Library
|
||||
title: The Hugging Face Concepts Library and Importing Textual Inversion files
|
||||
---
|
||||
|
||||
# :material-library-shelves: The Hugging Face Concepts Library and Importing Textual Inversion files
|
||||
# :material-file-document: Concepts Library
|
||||
|
||||
## Using Textual Inversion Files
|
||||
|
||||
Textual inversion (TI) files are small models that customize the output of
|
||||
Stable Diffusion image generation. They can augment SD with specialized subjects
|
||||
and artistic styles. They are also known as "embeds" in the machine learning
|
||||
world.
|
||||
Stable Diffusion image generation. They can augment SD with
|
||||
specialized subjects and artistic styles. They are also known as
|
||||
"embeds" in the machine learning world.
|
||||
|
||||
Each TI file introduces one or more vocabulary terms to the SD model. These are
|
||||
known in InvokeAI as "triggers." Triggers are often, but not always, denoted
|
||||
using angle brackets as in "<trigger-phrase>". The two most common type of
|
||||
TI files that you'll encounter are `.pt` and `.bin` files, which are produced by
|
||||
different TI training packages. InvokeAI supports both formats, but its
|
||||
[built-in TI training system](TEXTUAL_INVERSION.md) produces `.pt`.
|
||||
Each TI file introduces one or more vocabulary terms to the SD
|
||||
model. These are known in InvokeAI as "triggers." Triggers are often,
|
||||
but not always, denoted using angle brackets as in
|
||||
"<trigger-phrase>". The two most common type of TI files that you'll
|
||||
encounter are `.pt` and `.bin` files, which are produced by different
|
||||
TI training packages. InvokeAI supports both formats, but its [built-in
|
||||
TI training system](TEXTUAL_INVERSION.md) produces `.pt`.
|
||||
|
||||
The [Hugging Face company](https://huggingface.co/sd-concepts-library) has
|
||||
amassed a large ligrary of >800 community-contributed TI files covering a
|
||||
broad range of subjects and styles. InvokeAI has built-in support for this
|
||||
library which downloads and merges TI files automatically upon request. You can
|
||||
also install your own or others' TI files by placing them in a designated
|
||||
directory.
|
||||
The [Hugging Face company](https://huggingface.co/sd-concepts-library)
|
||||
has amassed a large ligrary of >800 community-contributed TI files
|
||||
covering a broad range of subjects and styles. InvokeAI has built-in
|
||||
support for this library which downloads and merges TI files
|
||||
automatically upon request. You can also install your own or others'
|
||||
TI files by placing them in a designated directory.
|
||||
|
||||
### An Example
|
||||
|
||||
Here are a few examples to illustrate how it works. All these images were
|
||||
generated using the command-line client and the Stable Diffusion 1.5 model:
|
||||
Here are a few examples to illustrate how it works. All these images
|
||||
were generated using the command-line client and the Stable Diffusion
|
||||
1.5 model:
|
||||
|
||||
| Japanese gardener | Japanese gardener <ghibli-face> | Japanese gardener <hoi4-leaders> | Japanese gardener <cartoona-animals> |
|
||||
| :--------------------------------: | :-----------------------------------: | :------------------------------------: | :----------------------------------------: |
|
||||
|  |  |  |  |
|
||||
Japanese gardener
|
||||
<br>
|
||||
<img src="../assets/concepts/image1.png">
|
||||
|
||||
Japanese gardener <ghibli-face>
|
||||
<br>
|
||||
<img src="../assets/concepts/image2.png">
|
||||
|
||||
Japanese gardener <hoi4-leaders>
|
||||
<br>
|
||||
<img src="../assets/concepts/image3.png">
|
||||
|
||||
Japanese gardener <cartoona-animals>
|
||||
<br>
|
||||
<img src="../assets/concepts/image4.png">
|
||||
|
||||
You can also combine styles and concepts:
|
||||
|
||||
<figure markdown>
|
||||
| A portrait of <alf> in <cartoona-animal> style |
|
||||
| :--------------------------------------------------------: |
|
||||
|  |
|
||||
</figure>
|
||||
A portrait of <alf> in <cartoona-animal> style
|
||||
<br>
|
||||
<img src="../assets/concepts/image5.png">
|
||||
|
||||
## Using a Hugging Face Concept
|
||||
|
||||
!!! warning "Authenticating to HuggingFace"
|
||||
Hugging Face TI concepts are downloaded and installed automatically as
|
||||
you require them. This requires your machine to be connected to the
|
||||
Internet. To find out what each concept is for, you can browse the
|
||||
[Hugging Face concepts
|
||||
library](https://huggingface.co/sd-concepts-library) and look at
|
||||
examples of what each concept produces.
|
||||
|
||||
Some concepts require valid authentication to HuggingFace. Without it, they will not be downloaded
|
||||
and will be silently ignored.
|
||||
When you have an idea of a concept you wish to try, go to the
|
||||
command-line client (CLI) and type a "<" character and the beginning
|
||||
of the Hugging Face concept name you wish to load. Press the Tab key,
|
||||
and the CLI will show you all matching concepts. You can also type "<"
|
||||
and Tab to get a listing of all ~800 concepts, but be prepared to
|
||||
scroll up to see them all! If there is more than one match you can
|
||||
continue to type and Tab until the concept is completed.
|
||||
|
||||
If you used an installer to install InvokeAI, you may have already set a HuggingFace token.
|
||||
If you skipped this step, you can:
|
||||
For example if you type "<x" and Tab, you'll be prompted with the completions:
|
||||
|
||||
- run the InvokeAI configuration script again (if you used a manual installer): `scripts/configure_invokeai.py`
|
||||
- set one of the `HUGGINGFACE_TOKEN` or `HUGGING_FACE_HUB_TOKEN` environment variables to contain your token
|
||||
```
|
||||
<xatu2> <xatu> <xbh> <xi> <xidiversity> <xioboma> <xuna> <xyz>
|
||||
```
|
||||
|
||||
Finally, if you already used any HuggingFace library on your computer, you might already have a token
|
||||
in your local cache. Check for a hidden `.huggingface` directory in your home folder. If it
|
||||
contains a `token` file, then you are all set.
|
||||
Now type "id" and press Tab. It will be autocompleted to
|
||||
"<xidiversity>" because this is a unique match.
|
||||
|
||||
Finish your prompt and generate as usual. You may include multiple
|
||||
concept terms in the prompt.
|
||||
|
||||
Hugging Face TI concepts are downloaded and installed automatically as you
|
||||
require them. This requires your machine to be connected to the Internet. To
|
||||
find out what each concept is for, you can browse the
|
||||
[Hugging Face concepts library](https://huggingface.co/sd-concepts-library) and
|
||||
look at examples of what each concept produces.
|
||||
If you have never used this concept before, you will see a message
|
||||
that the TI model is being downloaded and installed. After this, the
|
||||
concept will be saved locally (in the `models/sd-concepts-library`
|
||||
directory) for future use.
|
||||
|
||||
When you have an idea of a concept you wish to try, go to the command-line
|
||||
client (CLI) and type a `<` character and the beginning of the Hugging Face
|
||||
concept name you wish to load. Press ++tab++, and the CLI will show you all
|
||||
matching concepts. You can also type `<` and hit ++tab++ to get a listing of all
|
||||
~800 concepts, but be prepared to scroll up to see them all! If there is more
|
||||
than one match you can continue to type and ++tab++ until the concept is
|
||||
completed.
|
||||
Several steps happen during downloading and
|
||||
installation, including a scan of the file for malicious code. Should
|
||||
any errors occur, you will be warned and the concept will fail to
|
||||
load. Generation will then continue treating the trigger term as a
|
||||
normal string of characters (e.g. as literal "<ghibli-face>").
|
||||
|
||||
!!! example
|
||||
|
||||
if you type in `<x` and hit ++tab++, you'll be prompted with the completions:
|
||||
|
||||
```py
|
||||
<xatu2> <xatu> <xbh> <xi> <xidiversity> <xioboma> <xuna> <xyz>
|
||||
```
|
||||
|
||||
Now type `id` and press ++tab++. It will be autocompleted to `<xidiversity>`
|
||||
because this is a unique match.
|
||||
|
||||
Finish your prompt and generate as usual. You may include multiple concept terms
|
||||
in the prompt.
|
||||
|
||||
If you have never used this concept before, you will see a message that the TI
|
||||
model is being downloaded and installed. After this, the concept will be saved
|
||||
locally (in the `models/sd-concepts-library` directory) for future use.
|
||||
|
||||
Several steps happen during downloading and installation, including a scan of
|
||||
the file for malicious code. Should any errors occur, you will be warned and the
|
||||
concept will fail to load. Generation will then continue treating the trigger
|
||||
term as a normal string of characters (e.g. as literal `<ghibli-face>`).
|
||||
|
||||
You can also use `<concept-names>` in the WebGUI's prompt textbox. There is no
|
||||
autocompletion at this time.
|
||||
Currently auto-installation of concepts is a feature only available on
|
||||
the command-line client. Support for the WebUI is a work in progress.
|
||||
|
||||
## Installing your Own TI Files
|
||||
|
||||
You may install any number of `.pt` and `.bin` files simply by copying them into
|
||||
the `embeddings` directory of the InvokeAI runtime directory (usually `invokeai`
|
||||
in your home directory). You may create subdirectories in order to organize the
|
||||
files in any way you wish. Be careful not to overwrite one file with another.
|
||||
For example, TI files generated by the Hugging Face toolkit share the named
|
||||
`learned_embedding.bin`. You can use subdirectories to keep them distinct.
|
||||
You may install any number of `.pt` and `.bin` files simply by copying
|
||||
them into the `embeddings` directory of the InvokeAI runtime directory
|
||||
(usually `invokeai` in your home directory). You may create
|
||||
subdirectories in order to organize the files in any way you wish. Be
|
||||
careful not to overwrite one file with another. For example, TI files
|
||||
generated by the Hugging Face toolkit share the named
|
||||
`learned_embedding.bin`. You can use subdirectories to keep them
|
||||
distinct.
|
||||
|
||||
At startup time, InvokeAI will scan the `embeddings` directory and load any TI
|
||||
files it finds there. At startup you will see a message similar to this one:
|
||||
At startup time, InvokeAI will scan the `embeddings` directory and
|
||||
load any TI files it finds there. At startup you will see a message
|
||||
similar to this one:
|
||||
|
||||
```bash
|
||||
```
|
||||
>> Current embedding manager terms: *, <HOI4-Leader>, <princess-knight>
|
||||
```
|
||||
|
||||
Note the `*` trigger term. This is a placeholder term that many early TI
|
||||
tutorials taught people to use rather than a more descriptive term.
|
||||
Unfortunately, if you have multiple TI files that all use this term, only the
|
||||
first one loaded will be triggered by use of the term.
|
||||
Note the "*" trigger term. This is a placeholder term that many early
|
||||
TI tutorials taught people to use rather than a more descriptive
|
||||
term. Unfortunately, if you have multiple TI files that all use this
|
||||
term, only the first one loaded will be triggered by use of the term.
|
||||
|
||||
To avoid this problem, you can use the `merge_embeddings.py` script to merge two
|
||||
or more TI files together. If it encounters a collision of terms, the script
|
||||
will prompt you to select new terms that do not collide. See
|
||||
[Textual Inversion](TEXTUAL_INVERSION.md) for details.
|
||||
To avoid this problem, you can use the `merge_embeddings.py` script to
|
||||
merge two or more TI files together. If it encounters a collision of
|
||||
terms, the script will prompt you to select new terms that do not
|
||||
collide. See [Textual Inversion](TEXTUAL_INVERSION.md) for details.
|
||||
|
||||
## Further Reading
|
||||
|
||||
|
@ -12,19 +12,21 @@ stable diffusion to build the prompt on top of the image you provide, preserving
|
||||
the original's basic shape and layout. To use it, provide the `--init_img`
|
||||
option as shown here:
|
||||
|
||||
!!! example ""
|
||||
```commandline
|
||||
tree on a hill with a river, nature photograph, national geographic -I./test-pictures/tree-and-river-sketch.png -f 0.85
|
||||
```
|
||||
|
||||
```commandline
|
||||
tree on a hill with a river, nature photograph, national geographic -I./test-pictures/tree-and-river-sketch.png -f 0.85
|
||||
```
|
||||
This will take the original image shown here:
|
||||
|
||||
<figure markdown>
|
||||
<figure markdown>
|
||||
{ width=320 }
|
||||
</figure>
|
||||
|
||||
| original image | generated image |
|
||||
| :------------: | :-------------: |
|
||||
| { width=320 } | { width=320 } |
|
||||
and generate a new image based on it as shown here:
|
||||
|
||||
</figure>
|
||||
<figure markdown>
|
||||
{ width=320 }
|
||||
</figure>
|
||||
|
||||
The `--init_img` (`-I`) option gives the path to the seed picture. `--strength`
|
||||
(`-f`) controls how much the original will be modified, ranging from `0.0` (keep
|
||||
@ -86,15 +88,13 @@ from a prompt. If the step count is 10, then the "latent space" (Stable
|
||||
Diffusion's internal representation of the image) for the prompt "fire" with
|
||||
seed `1592514025` develops something like this:
|
||||
|
||||
!!! example ""
|
||||
```bash
|
||||
invoke> "fire" -s10 -W384 -H384 -S1592514025
|
||||
```
|
||||
|
||||
```bash
|
||||
invoke> "fire" -s10 -W384 -H384 -S1592514025
|
||||
```
|
||||
|
||||
<figure markdown>
|
||||
{ width=720 }
|
||||
</figure>
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
Put simply: starting from a frame of fuzz/static, SD finds details in each frame
|
||||
that it thinks look like "fire" and brings them a little bit more into focus,
|
||||
@ -109,23 +109,25 @@ into the sequence at the appropriate point, with just the right amount of noise.
|
||||
|
||||
### A concrete example
|
||||
|
||||
!!! example "I want SD to draw a fire based on this hand-drawn image"
|
||||
I want SD to draw a fire based on this hand-drawn image:
|
||||
|
||||
{ align=left }
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
Let's only do 10 steps, to make it easier to see what's happening. If strength
|
||||
is `0.7`, this is what the internal steps the algorithm has to take will look
|
||||
like:
|
||||
Let's only do 10 steps, to make it easier to see what's happening. If strength
|
||||
is `0.7`, this is what the internal steps the algorithm has to take will look
|
||||
like:
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
With strength `0.4`, the steps look more like this:
|
||||
With strength `0.4`, the steps look more like this:
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
Notice how much more fuzzy the starting image is for strength `0.7` compared to
|
||||
`0.4`, and notice also how much longer the sequence is with `0.7`:
|
||||
|
@ -158,7 +158,7 @@ when filling in missing regions. It has an almost uncanny ability to blend the
|
||||
new regions with existing ones in a semantically coherent way.
|
||||
|
||||
To install the inpainting model, follow the
|
||||
[instructions](../installation/050_INSTALLING_MODELS.md) for installing a new model.
|
||||
[instructions](../installation/INSTALLING_MODELS.md) for installing a new model.
|
||||
You may use either the CLI (`invoke.py` script) or directly edit the
|
||||
`configs/models.yaml` configuration file to do this. The main thing to watch out
|
||||
for is that the the model `config` option must be set up to use
|
||||
|
@ -120,7 +120,7 @@ A number of caveats:
|
||||
(`--iterations`) argument.
|
||||
|
||||
3. Your results will be _much_ better if you use the `inpaint-1.5` model
|
||||
released by runwayML and installed by default by `scripts/configure_invokeai.py`.
|
||||
released by runwayML and installed by default by `scripts/preload_models.py`.
|
||||
This model was trained specifically to harmoniously fill in image gaps. The
|
||||
standard model will work as well, but you may notice color discontinuities at
|
||||
the border.
|
||||
|
@ -28,17 +28,21 @@ should "just work" without further intervention. Simply pass the `--upscale`
|
||||
the popup in the Web GUI.
|
||||
|
||||
**GFPGAN** requires a series of downloadable model files to work. These are
|
||||
loaded when you run `scripts/configure_invokeai.py`. If GFPAN is failing with an
|
||||
loaded when you run `scripts/preload_models.py`. If GFPAN is failing with an
|
||||
error, please run the following from the InvokeAI directory:
|
||||
|
||||
```bash
|
||||
python scripts/configure_invokeai.py
|
||||
python scripts/preload_models.py
|
||||
```
|
||||
|
||||
If you do not run this script in advance, the GFPGAN module will attempt to
|
||||
download the models files the first time you try to perform facial
|
||||
reconstruction.
|
||||
|
||||
## Usage
|
||||
|
||||
You will now have access to two new prompt arguments.
|
||||
|
||||
### Upscaling
|
||||
|
||||
`-U : <upscaling_factor> <upscaling_strength>`
|
||||
@ -106,7 +110,7 @@ This repo also allows you to perform face restoration using
|
||||
[CodeFormer](https://github.com/sczhou/CodeFormer).
|
||||
|
||||
In order to setup CodeFormer to work, you need to download the models like with
|
||||
GFPGAN. You can do this either by running `configure_invokeai.py` or by manually
|
||||
GFPGAN. You can do this either by running `preload_models.py` or by manually
|
||||
downloading the
|
||||
[model file](https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth)
|
||||
and saving it to `ldm/invoke/restoration/codeformer/weights` folder.
|
||||
@ -115,7 +119,7 @@ You can use `-ft` prompt argument to swap between CodeFormer and the default
|
||||
GFPGAN. The above mentioned `-G` prompt argument will allow you to control the
|
||||
strength of the restoration effect.
|
||||
|
||||
### CodeFormer Usage
|
||||
### Usage
|
||||
|
||||
The following command will perform face restoration with CodeFormer instead of
|
||||
the default gfpgan.
|
||||
@ -156,7 +160,7 @@ A new file named `000044.2945021133.fixed.png` will be created in the output
|
||||
directory. Note that the `!fix` command does not replace the original file,
|
||||
unlike the behavior at generate time.
|
||||
|
||||
## How to disable
|
||||
### Disabling
|
||||
|
||||
If, for some reason, you do not wish to load the GFPGAN and/or ESRGAN libraries,
|
||||
you can disable them on the invoke.py command line with the `--no_restore` and
|
||||
|
@ -20,55 +20,16 @@ would type at the invoke> prompt:
|
||||
Then pass this file's name to `invoke.py` when you invoke it:
|
||||
|
||||
```bash
|
||||
python scripts/invoke.py --from_file "/path/to/prompts.txt"
|
||||
(invokeai) ~/stable-diffusion$ python3 scripts/invoke.py --from_file "path/to/prompts.txt"
|
||||
```
|
||||
|
||||
You may also read a series of prompts from standard input by providing
|
||||
a filename of `-`. For example, here is a python script that creates a
|
||||
matrix of prompts, each one varying slightly:
|
||||
You may read a series of prompts from standard input by providing a filename of
|
||||
`-`:
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env python
|
||||
|
||||
adjectives = ['sunny','rainy','overcast']
|
||||
samplers = ['k_lms','k_euler_a','k_heun']
|
||||
cfg = [7.5, 9, 11]
|
||||
|
||||
for adj in adjectives:
|
||||
for samp in samplers:
|
||||
for cg in cfg:
|
||||
print(f'a {adj} day -A{samp} -C{cg}')
|
||||
(invokeai) ~/stable-diffusion$ echo "a beautiful day" | python3 scripts/invoke.py --from_file -
|
||||
```
|
||||
|
||||
It's output looks like this (abbreviated):
|
||||
|
||||
```bash
|
||||
a sunny day -Aklms -C7.5
|
||||
a sunny day -Aklms -C9
|
||||
a sunny day -Aklms -C11
|
||||
a sunny day -Ak_euler_a -C7.5
|
||||
a sunny day -Ak_euler_a -C9
|
||||
...
|
||||
a overcast day -Ak_heun -C9
|
||||
a overcast day -Ak_heun -C11
|
||||
```
|
||||
|
||||
To feed it to invoke.py, pass the filename of "-"
|
||||
|
||||
```bash
|
||||
python matrix.py | python scripts/invoke.py --from_file -
|
||||
```
|
||||
|
||||
When the script is finished, each of the 27 combinations
|
||||
of adjective, sampler and CFG will be executed.
|
||||
|
||||
The command-line interface provides `!fetch` and `!replay` commands
|
||||
which allow you to read the prompts from a single previously-generated
|
||||
image or a whole directory of them, write the prompts to a file, and
|
||||
then replay them. Or you can create your own file of prompts and feed
|
||||
them to the command-line client from within an interactive session.
|
||||
See [Command-Line Interface](CLI.md) for details.
|
||||
|
||||
---
|
||||
|
||||
## **Negative and Unconditioned Prompts**
|
||||
@ -90,9 +51,7 @@ original prompt:
|
||||
`#!bash "A fantastical translucent pony made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
That image has a woman, so if we want the horse without a rider, we can
|
||||
@ -102,9 +61,7 @@ this:
|
||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
That's nice - but say we also don't want the image to be quite so blue. We can
|
||||
@ -113,9 +70,7 @@ add "blue" to the list of negative prompts, so it's now [woman blue]:
|
||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
Getting close - but there's no sense in having a saddle when our horse doesn't
|
||||
@ -124,9 +79,7 @@ have a rider, so we'll add one more negative prompt: [woman blue saddle].
|
||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue saddle]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
!!! notes "Notes about this feature:"
|
||||
@ -171,12 +124,8 @@ this prompt of `a man picking apricots from a tree`, let's see what happens if
|
||||
we increase and decrease how much attention we want Stable Diffusion to pay to
|
||||
the word `apricots`:
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
Using `-` to reduce apricot-ness:
|
||||
|
||||
| `a man picking apricots- from a tree` | `a man picking apricots-- from a tree` | `a man picking apricots--- from a tree` |
|
||||
@ -192,12 +141,8 @@ Using `+` to increase apricot-ness:
|
||||
You can also change the balance between different parts of a prompt. For
|
||||
example, below is a `mountain man`:
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
And here he is with more mountain:
|
||||
|
||||
| `mountain+ man` | `mountain++ man` | `mountain+++ man` |
|
||||
@ -240,27 +185,27 @@ use the `prompt2prompt` syntax to substitute words in the original prompt for
|
||||
words in a new prompt. This works for `img2img` as well.
|
||||
|
||||
- `a ("fluffy cat").swap("smiling dog") eating a hotdog`.
|
||||
- quotes optional: `a (fluffy cat).swap(smiling dog) eating a hotdog`.
|
||||
- for single word substitutions parentheses are also optional:
|
||||
`a cat.swap(dog) eating a hotdog`.
|
||||
- quotes optional: `a (fluffy cat).swap(smiling dog) eating a hotdog`.
|
||||
- for single word substitutions parentheses are also optional:
|
||||
`a cat.swap(dog) eating a hotdog`.
|
||||
- Supports options `s_start`, `s_end`, `t_start`, `t_end` (each 0-1) loosely
|
||||
corresponding to bloc97's `prompt_edit_spatial_start/_end` and
|
||||
`prompt_edit_tokens_start/_end` but with the math swapped to make it easier to
|
||||
intuitively understand.
|
||||
- Example usage:`a (cat).swap(dog, s_end=0.3) eating a hotdog` - the `s_end`
|
||||
argument means that the "spatial" (self-attention) edit will stop having any
|
||||
effect after 30% (=0.3) of the steps have been done, leaving Stable
|
||||
Diffusion with 70% of the steps where it is free to decide for itself how to
|
||||
reshape the cat-form into a dog form.
|
||||
- The numbers represent a percentage through the step sequence where the edits
|
||||
should happen. 0 means the start (noisy starting image), 1 is the end (final
|
||||
image).
|
||||
- For img2img, the step sequence does not start at 0 but instead at
|
||||
(1-strength) - so if strength is 0.7, s_start and s_end must both be
|
||||
greater than 0.3 (1-0.7) to have any effect.
|
||||
- Example usage:`a (cat).swap(dog, s_end=0.3) eating a hotdog` - the `s_end`
|
||||
argument means that the "spatial" (self-attention) edit will stop having any
|
||||
effect after 30% (=0.3) of the steps have been done, leaving Stable
|
||||
Diffusion with 70% of the steps where it is free to decide for itself how to
|
||||
reshape the cat-form into a dog form.
|
||||
- The numbers represent a percentage through the step sequence where the edits
|
||||
should happen. 0 means the start (noisy starting image), 1 is the end (final
|
||||
image).
|
||||
- For img2img, the step sequence does not start at 0 but instead at
|
||||
(1-strength) - so if strength is 0.7, s_start and s_end must both be
|
||||
greater than 0.3 (1-0.7) to have any effect.
|
||||
- Convenience option `shape_freedom` (0-1) to specify how much "freedom" Stable
|
||||
Diffusion should have to change the shape of the subject being swapped.
|
||||
- `a (cat).swap(dog, shape_freedom=0.5) eating a hotdog`.
|
||||
- `a (cat).swap(dog, shape_freedom=0.5) eating a hotdog`.
|
||||
|
||||
The `prompt2prompt` code is based off
|
||||
[bloc97's colab](https://github.com/bloc97/CrossAttentionControl).
|
||||
@ -314,18 +259,14 @@ usual, unless you fix the seed, the prompts will give you different results each
|
||||
time you run them.
|
||||
|
||||
<figure markdown>
|
||||
|
||||
### "blue sphere, red cube, hybrid"
|
||||
|
||||
</figure>
|
||||
|
||||
This example doesn't use melding at all and represents the default way of mixing
|
||||
concepts.
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
It's interesting to see how the AI expressed the concept of "cube" as the four
|
||||
@ -333,7 +274,6 @@ quadrants of the enclosing frame. If you look closely, there is depth there, so
|
||||
the enclosing frame is actually a cube.
|
||||
|
||||
<figure markdown>
|
||||
|
||||
### "blue sphere:0.25 red cube:0.75 hybrid"
|
||||
|
||||

|
||||
@ -346,7 +286,6 @@ the AI's "latent space" of semantic representations. Where is Ludwig
|
||||
Wittgenstein when you need him?
|
||||
|
||||
<figure markdown>
|
||||
|
||||
### "blue sphere:0.75 red cube:0.25 hybrid"
|
||||
|
||||

|
||||
@ -357,7 +296,6 @@ Definitely more blue-spherey. The cube is gone entirely, but it's really cool
|
||||
abstract art.
|
||||
|
||||
<figure markdown>
|
||||
|
||||
### "blue sphere:0.5 red cube:0.5 hybrid"
|
||||
|
||||

|
||||
@ -368,7 +306,6 @@ Whoa...! I see blue and red, but no spheres or cubes. Is the word "hybrid"
|
||||
summoning up the concept of some sort of scifi creature? Let's find out.
|
||||
|
||||
<figure markdown>
|
||||
|
||||
### "blue sphere:0.5 red cube:0.5"
|
||||
|
||||

|
||||
|
@ -1,284 +1,117 @@
|
||||
---
|
||||
title: Unified Canvas
|
||||
---
|
||||
The Unified Canvas is a tool designed to streamline and simplify the process of composing an image using Stable Diffusion. It offers artists all of the available Stable Diffusion generation modes (Text To Image, Image To Image, Inpainting, and Outpainting) as a single unified workflow. The flexibility of the tool allows you to tweak and edit image generations, extend images beyond their initial size, and to create new content in a freeform way both inside and outside of existing images.
|
||||
|
||||
The Unified Canvas is a tool designed to streamline and simplify the process of
|
||||
composing an image using Stable Diffusion. It offers artists all of the
|
||||
available Stable Diffusion generation modes (Text To Image, Image To Image,
|
||||
Inpainting, and Outpainting) as a single unified workflow. The flexibility of
|
||||
the tool allows you to tweak and edit image generations, extend images beyond
|
||||
their initial size, and to create new content in a freeform way both inside and
|
||||
outside of existing images.
|
||||
This document explains the basics of using the Unified Canvas, introducing you to its features and tools one by one. It also describes some of the more advanced tools available to power users of the Canvas.
|
||||
|
||||
This document explains the basics of using the Unified Canvas, introducing you
|
||||
to its features and tools one by one. It also describes some of the more
|
||||
advanced tools available to power users of the Canvas.
|
||||
|
||||
## Basics
|
||||
|
||||
The Unified Canvas consists of two layers: the **Base Layer** and the **Mask
|
||||
Layer**. You can swap from one layer to the other by selecting the layer you
|
||||
want in the drop-down menu on the top left corner of the Unified Canvas, or by
|
||||
pressing the (Q) hotkey.
|
||||
# Basics
|
||||
The Unified Canvas consists of two layers: the **Base Layer** and the **Mask Layer**. You can swap from one layer to the other by selecting the layer you want in the drop-down menu on the top left corner of the Unified Canvas, or by pressing the (Q) hotkey.
|
||||
|
||||
### Base Layer
|
||||
|
||||
The **Base Layer** is the image content currently managed by the Canvas, and can
|
||||
be exported at any time to the gallery by using the **Save to Gallery** option.
|
||||
When the Base Layer is selected, the Brush (B) and Eraser (E) tools will
|
||||
directly manipulate the base layer. Any images uploaded to the Canvas, or sent
|
||||
to the Unified Canvas from the gallery, will clear out all existing content and
|
||||
set the Base layer to the new image.
|
||||
The **Base Layer** is the image content currently managed by the Canvas, and can be exported at any time to the gallery by using the **Save to Gallery** option. When the Base Layer is selected, the Brush (B) and Eraser (E) tools will directly manipulate the base layer. Any images uploaded to the Canvas, or sent to the Unified Canvas from the gallery, will clear out all existing content and set the Base layer to the new image.
|
||||
|
||||
### Staging Area
|
||||
|
||||
When you generate images, they will display in the Canvas's **Staging Area**,
|
||||
alongside the Staging Area toolbar buttons. While the Staging Area is active,
|
||||
you cannot interact with the Canvas itself.
|
||||
When you generate images, they will display in the Canvas's **Staging Area**, alongside the Staging Area toolbar buttons. While the Staging Area is active, you cannot interact with the Canvas itself.
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||
</figure>
|
||||
|
||||
Accepting generations will commit the new generation to the **Base Layer**. You
|
||||
can review all generated images using the Prev/Next arrows, save any individual
|
||||
generations to your gallery (without committing to the Base layer) or discard
|
||||
generations. While you can Undo a discard in an individual Canvas session, any
|
||||
generations that are not saved will be lost when the Canvas resets.
|
||||
Accepting generations will commit the new generation to the **Base Layer**. You can review all generated images using the Prev/Next arrows, save any individual generations to your gallery (without committing to the Base layer) or discard generations. While you can Undo a discard in an individual Canvas session, any generations that are not saved will be lost when the Canvas resets.
|
||||
|
||||
### Mask Layer
|
||||
|
||||
The **Mask Layer** consists of any masked sections that have been created to
|
||||
inform Inpainting generations. You can paint a new mask, or edit an existing
|
||||
mask, using the Brush tool and the Eraser with the Mask layer set as your Active
|
||||
layer. Any masked areas will only affect generation inside of the current
|
||||
bounding box.
|
||||
The **Mask Layer** consists of any masked sections that have been created to inform Inpainting generations. You can paint a new mask, or edit an existing mask, using the Brush tool and the Eraser with the Mask layer set as your Active layer. Any masked areas will only affect generation inside of the current bounding box.
|
||||
|
||||
### Bounding Box
|
||||
|
||||
When generating a new image, Invoke will process and apply new images within the
|
||||
area denoted by the **Bounding Box**. The Width & Height settings of the
|
||||
Bounding Box, as well as its location within the Unified Canvas and pixels or
|
||||
empty space that it encloses, determine how new invocations are generated - see
|
||||
[Inpainting & Outpainting](#inpainting-and-outpainting) below. The Bounding Box
|
||||
can be moved and resized using the Move (V) tool. It can also be resized using
|
||||
the Bounding Box options in the Options Panel. By using these controls you can
|
||||
generate larger or smaller images, control which sections of the image are being
|
||||
processed, as well as control Bounding Box tools like the Bounding Box
|
||||
fill/erase.
|
||||
When generating a new image, Invoke will process and apply new images within the area denoted by the **Bounding Box**. The Width & Height settings of the Bounding Box, as well as its location within the Unified Canvas and pixels or empty space that it encloses, determine how new invocations are generated - see [Inpainting & Outpainting](#inpainting-and-outpainting) below. The Bounding Box can be moved and resized using the Move (V) tool. It can also be resized using the Bounding Box options in the Options Panel. By using these controls you can generate larger or smaller images, control which sections of the image are being processed, as well as control Bounding Box tools like the Bounding Box fill/erase.
|
||||
|
||||
### <a name="inpainting-and-outpainting"></a> Inpainting & Outpainting
|
||||
|
||||
"Inpainting" means asking the AI to refine part of an image while leaving the
|
||||
rest alone. For example, updating a portrait of your grandmother to have her
|
||||
wear a biker's jacket.
|
||||
|
||||
| masked original | inpaint result |
|
||||
| :-------------------------------------------------------------: | :----------------------------------------------------------------------------------------: |
|
||||
|  |  |
|
||||
|
||||
"Outpainting" means asking the AI to expand the original image beyond its
|
||||
original borders, making a bigger image that's still based on the original. For
|
||||
example, extending the above image of your Grandmother in a biker's jacket to
|
||||
include her wearing jeans (and while we're at it, a motorcycle!)
|
||||
"Inpainting" means asking the AI to refine part of an image while leaving the rest alone. For example, updating a portrait of your grandmother to have her wear a biker's jacket.
|
||||
|
||||
<figure markdown>
|
||||
|
||||

|
||||
|
||||

|
||||
</figure>
|
||||
|
||||
When you are using the Unified Canvas, Invoke decides automatically whether to
|
||||
do Inpainting, Outpainting, ImageToImage, or TextToImage by looking inside the
|
||||
area enclosed by the Bounding Box. It chooses the appropriate type of generation
|
||||
based on whether the Bounding Box contains empty (transparent) areas on the Base
|
||||
layer, or whether it contains colored areas from previous generations (or from
|
||||
painted brushstrokes) on the Base layer, and/or whether the Mask layer contains
|
||||
any brushstrokes. See [Generation Methods](#generation-methods) below for more
|
||||
information.
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
## Getting Started
|
||||
"Outpainting" means asking the AI to expand the original image beyond its original borders, making a bigger image that's still based on the original. For example, extending the above image of your Grandmother in a biker's jacket to include her wearing jeans (and while we're at it, a motorcycle!)
|
||||
|
||||
To get started with the Unified Canvas, you will want to generate a new base
|
||||
layer using Txt2Img or importing an initial image. We'll refer to either of
|
||||
these methods as the "initial image" in the below guide.
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
When you are using the Unified Canvas, Invoke decides automatically whether to do Inpainting, Outpainting, ImageToImage, or TextToImage by looking inside the area enclosed by the Bounding Box. It chooses the appropriate type of generation based on whether the Bounding Box contains empty (transparent) areas on the Base layer, or whether it contains colored areas from previous generations (or from painted brushstrokes) on the Base layer, and/or whether the Mask layer contains any brushstrokes. See [Generation Methods](#generation-methods) below for more information.
|
||||
|
||||
# Getting Started
|
||||
|
||||
To get started with the Unified Canvas, you will want to generate a new base layer using Txt2Img or importing an initial image. We'll refer to either of these methods as the "initial image" in the below guide.
|
||||
|
||||
From there, you can consider the following techniques to augment your image:
|
||||
* **New Images**: Move the bounding box to an empty area of the Canvas, type in your prompt, and Invoke, to generate a new image using the Text to Image function.
|
||||
* **Image Correction**: Use the color picker and brush tool to paint corrections on the image, switch to the Mask layer, and brush a mask over your painted area to use **Inpainting**. You can also use the **ImageToImage** generation method to invoke new interpretations of the image.
|
||||
* **Image Expansion**: Move the bounding box to include a portion of your initial image, and a portion of transparent/empty pixels, then Invoke using a prompt that describes what you'd like to see in that area. This will Outpaint the image. You'll typically find more coherent results if you keep about 50-60% of the original image in the bounding box. Make sure that the Image To Image Strength slider is set to a high value - you may need to set it higher than you are used to.
|
||||
* **New Content on Existing Images**: If you want to add new details or objects into your image, use the brush tool to paint a sketch of what you'd like to see on the image, switch to the Mask layer, and brush a mask over your painted area to use **Inpainting**. If the masked area is small, consider using a smaller bounding box to take advantage of Invoke's automatic Scaling features, which can help to produce better details.
|
||||
* **And more**: There are a number of creative ways to use the Canvas, and the above are just starting points. We're excited to see what you come up with!
|
||||
|
||||
- **New Images**: Move the bounding box to an empty area of the Canvas, type in
|
||||
your prompt, and Invoke, to generate a new image using the Text to Image
|
||||
function.
|
||||
- **Image Correction**: Use the color picker and brush tool to paint corrections
|
||||
on the image, switch to the Mask layer, and brush a mask over your painted
|
||||
area to use **Inpainting**. You can also use the **ImageToImage** generation
|
||||
method to invoke new interpretations of the image.
|
||||
- **Image Expansion**: Move the bounding box to include a portion of your
|
||||
initial image, and a portion of transparent/empty pixels, then Invoke using a
|
||||
prompt that describes what you'd like to see in that area. This will Outpaint
|
||||
the image. You'll typically find more coherent results if you keep about
|
||||
50-60% of the original image in the bounding box. Make sure that the Image To
|
||||
Image Strength slider is set to a high value - you may need to set it higher
|
||||
than you are used to.
|
||||
- **New Content on Existing Images**: If you want to add new details or objects
|
||||
into your image, use the brush tool to paint a sketch of what you'd like to
|
||||
see on the image, switch to the Mask layer, and brush a mask over your painted
|
||||
area to use **Inpainting**. If the masked area is small, consider using a
|
||||
smaller bounding box to take advantage of Invoke's automatic Scaling features,
|
||||
which can help to produce better details.
|
||||
- **And more**: There are a number of creative ways to use the Canvas, and the
|
||||
above are just starting points. We're excited to see what you come up with!
|
||||
|
||||
## <a name="generation-methods"></a> Generation Methods
|
||||
# <a name="generation-methods"></a> Generation Methods
|
||||
The Canvas can use all generation methods available (Txt2Img, Img2Img, Inpainting, and Outpainting), and these will be automatically selected and used based on the current selection area within the Bounding Box.
|
||||
|
||||
The Canvas can use all generation methods available (Txt2Img, Img2Img,
|
||||
Inpainting, and Outpainting), and these will be automatically selected and used
|
||||
based on the current selection area within the Bounding Box.
|
||||
## Text to Image
|
||||
If the Bounding Box is placed over an area of Canvas with an **empty Base Layer**, invoking a new image will use **TextToImage**. This generates an entirely new image based on your prompt.
|
||||
|
||||
### Text to Image
|
||||
## Image to Image
|
||||
If the Bounding Box is placed over an area of Canvas with an **existing Base Layer area with no transparent pixels or masks**, invoking a new image will use **ImageToImage**. This uses the image within the bounding box and your prompt to interpret a new image. The image will be closer to your original image at lower Image to Image strengths.
|
||||
|
||||
If the Bounding Box is placed over an area of Canvas with an **empty Base
|
||||
Layer**, invoking a new image will use **TextToImage**. This generates an
|
||||
entirely new image based on your prompt.
|
||||
## Inpainting
|
||||
If the Bounding Box is placed over an area of Canvas with an **existing Base Layer and any pixels selected using the Mask layer**, invoking a new image will use **Inpainting**. Inpainting uses the existing colors/forms in the masked area in order to generate a new image for the masked area only. The unmasked portion of the image will remain the same. Image to Image strength applies to the inpainted area.
|
||||
|
||||
### Image to Image
|
||||
If you desire something completely different from the original image in your new generation (i.e., if you want Invoke to ignore existing colors/forms), consider toggling the Inpaint Replace setting on, and use high values for both Inpaint Replace and Image To Image Strength.
|
||||
|
||||
If the Bounding Box is placed over an area of Canvas with an **existing Base
|
||||
Layer area with no transparent pixels or masks**, invoking a new image will use
|
||||
**ImageToImage**. This uses the image within the bounding box and your prompt to
|
||||
interpret a new image. The image will be closer to your original image at lower
|
||||
Image to Image strengths.
|
||||
> Note: By default, the **Scale Before Processing** option — which inpaints more coherent details by generating at a larger resolution and then scaling — is only activated when the Bounding Box is relatively small. To get the best inpainting results you should therefore resize your Bounding Box to the smallest area that contains your mask and enough surrounding detail to help Stable Diffusion understand the context of what you want it to draw. You should also update your prompt so that it describes *just* the area within the Bounding Box.
|
||||
|
||||
### Inpainting
|
||||
## Outpainting
|
||||
If the Bounding Box is placed over an area of Canvas partially filled by an existing Base Layer area and partially by transparent pixels or masks, invoking a new image will use **Outpainting**, as well as **Inpainting** any masked areas.
|
||||
|
||||
If the Bounding Box is placed over an area of Canvas with an **existing Base
|
||||
Layer and any pixels selected using the Mask layer**, invoking a new image will
|
||||
use **Inpainting**. Inpainting uses the existing colors/forms in the masked area
|
||||
in order to generate a new image for the masked area only. The unmasked portion
|
||||
of the image will remain the same. Image to Image strength applies to the
|
||||
inpainted area.
|
||||
____
|
||||
|
||||
If you desire something completely different from the original image in your new
|
||||
generation (i.e., if you want Invoke to ignore existing colors/forms), consider
|
||||
toggling the Inpaint Replace setting on, and use high values for both Inpaint
|
||||
Replace and Image To Image Strength.
|
||||
# Advanced Features
|
||||
|
||||
!!! note
|
||||
Features with non-obvious behavior are detailed below, in order to provide clarity on the intent and common use cases we expect for utilizing them.
|
||||
|
||||
By default, the **Scale Before Processing** option — which
|
||||
inpaints more coherent details by generating at a larger resolution and then
|
||||
scaling — is only activated when the Bounding Box is relatively small.
|
||||
To get the best inpainting results you should therefore resize your Bounding
|
||||
Box to the smallest area that contains your mask and enough surrounding detail
|
||||
to help Stable Diffusion understand the context of what you want it to draw.
|
||||
You should also update your prompt so that it describes _just_ the area within
|
||||
the Bounding Box.
|
||||
## Toolbar
|
||||
|
||||
### Outpainting
|
||||
### Mask Options
|
||||
* **Enable Mask** - This flag can be used to Enable or Disable the currently painted mask. If you have painted a mask, but you don't want it affect the next invocation, but you *also* don't want to delete it, then you can set this option to Disable. When you want the mask back, set this back to Enable.
|
||||
* **Preserve Masked Area** - When enabled, Preserve Masked Area inverts the effect of the Mask on the Inpainting process. Pixels in masked areas will be kept unchanged, and unmasked areas will be regenerated.
|
||||
|
||||
If the Bounding Box is placed over an area of Canvas partially filled by an
|
||||
existing Base Layer area and partially by transparent pixels or masks, invoking
|
||||
a new image will use **Outpainting**, as well as **Inpainting** any masked
|
||||
areas.
|
||||
### Creative Tools
|
||||
* **Brush - Base/Mask Modes** - The Brush tool switches automatically between different modes of operation for the Base and Mask layers respectively.
|
||||
* On the Base layer, the brush will directly paint on the Canvas using the color selected on the Brush Options menu.
|
||||
* On the Mask layer, the brush will create a new mask. If you're finding the mask difficult to see over the existing content of the Unified Canvas, you can change the color it is drawn with using the color selector on the Mask Options dropdown.
|
||||
* **Erase Bounding Box** - On the Base layer, erases all pixels within the Bounding Box.
|
||||
* **Fill Bounding Box** - On the Base layer, fills all pixels within the Bounding Box with the currently selected color.
|
||||
|
||||
---
|
||||
### Canvas Tools
|
||||
* **Move Tool** - Allows for manipulation of the Canvas view (by dragging on the Canvas, outside the bounding box), the Bounding Box (by dragging the edges of the box), or the Width/Height of the Bounding Box (by dragging one of the 9 directional handles).
|
||||
* **Reset View** - Click to re-orients the view to the center of the Bounding Box.
|
||||
* **Merge Visible** - If your browser is having performance problems drawing the image in the Unified Canvas, click this to consolidate all of the information currently being rendered by your browser into a merged copy of the image. This lowers the resource requirements and should improve performance.
|
||||
|
||||
## Advanced Features
|
||||
## Seam Correction
|
||||
When doing Inpainting or Outpainting, Invoke needs to merge the pixels generated by Stable Diffusion into your existing image. To do this, the area around the `seam` at the boundary between your image and the new generation is automatically blended to produce a seamless output. In a fully automatic process, a mask is generated to cover the seam, and then the area of the seam is Inpainted.
|
||||
|
||||
Features with non-obvious behavior are detailed below, in order to provide
|
||||
clarity on the intent and common use cases we expect for utilizing them.
|
||||
Although the default options should work well most of the time, sometimes it can help to alter the parameters that control the seam Inpainting. A wider seam and a blur setting of about 1/3 of the seam have been noted as producing consistently strong results (e.g. 96 wide and 16 blur - adds up to 32 blur with both sides). Seam strength of 0.7 is best for reducing hard seams.
|
||||
* **Seam Size** - The size of the seam masked area. Set higher to make a larger mask around the seam.
|
||||
* **Seam Blur** - The size of the blur that is applied on *each* side of the masked area.
|
||||
* **Seam Strength** - The Image To Image Strength parameter used for the Inpainting generation that is applied to the seam area.
|
||||
* **Seam Steps** - The number of generation steps that should be used to Inpaint the seam.
|
||||
|
||||
### Toolbar
|
||||
## Infill & Scaling
|
||||
* **Scale Before Processing & W/H**: When generating images with a bounding box smaller than the optimized W/H of the model (e.g., 512x512 for SD1.5), this feature first generates at a larger size with the same aspect ratio, and then scales that image down to fill the selected area. This is particularly useful when inpainting very small details. Scaling is optional but is enabled by default.
|
||||
* **Inpaint Replace**: When Inpainting, the default method is to utilize the existing RGB values of the Base layer to inform the generation process. If Inpaint Replace is enabled, noise is generated and blended with the existing pixels (completely replacing the original RGB values at an Inpaint Replace value of 1). This can help generate more variation from the pixels on the Base layers.
|
||||
* > When using Inpaint Replace you should use a higher Image To Image Strength value, especially at higher Inpaint Replace values
|
||||
* **Infill Method**: Invoke currently supports two methods for producing RGB values for use in the Outpainting process: Patchmatch and Tile. We believe that Patchmatch is the superior method, however we provide support for Tile in case Patchmatch cannot be installed or is unavailable on your computer.
|
||||
* **Tile Size**: The Tile method for Outpainting sources small portions of the original image and randomly place these into the areas being Outpainted. This value sets the size of those tiles.
|
||||
|
||||
#### Mask Options
|
||||
|
||||
- **Enable Mask** - This flag can be used to Enable or Disable the currently
|
||||
painted mask. If you have painted a mask, but you don't want it affect the
|
||||
next invocation, but you _also_ don't want to delete it, then you can set this
|
||||
option to Disable. When you want the mask back, set this back to Enable.
|
||||
- **Preserve Masked Area** - When enabled, Preserve Masked Area inverts the
|
||||
effect of the Mask on the Inpainting process. Pixels in masked areas will be
|
||||
kept unchanged, and unmasked areas will be regenerated.
|
||||
|
||||
#### Creative Tools
|
||||
|
||||
- **Brush - Base/Mask Modes** - The Brush tool switches automatically between
|
||||
different modes of operation for the Base and Mask layers respectively.
|
||||
- On the Base layer, the brush will directly paint on the Canvas using the
|
||||
color selected on the Brush Options menu.
|
||||
- On the Mask layer, the brush will create a new mask. If you're finding the
|
||||
mask difficult to see over the existing content of the Unified Canvas, you
|
||||
can change the color it is drawn with using the color selector on the Mask
|
||||
Options dropdown.
|
||||
- **Erase Bounding Box** - On the Base layer, erases all pixels within the
|
||||
Bounding Box.
|
||||
- **Fill Bounding Box** - On the Base layer, fills all pixels within the
|
||||
Bounding Box with the currently selected color.
|
||||
|
||||
#### Canvas Tools
|
||||
|
||||
- **Move Tool** - Allows for manipulation of the Canvas view (by dragging on the
|
||||
Canvas, outside the bounding box), the Bounding Box (by dragging the edges of
|
||||
the box), or the Width/Height of the Bounding Box (by dragging one of the 9
|
||||
directional handles).
|
||||
- **Reset View** - Click to re-orients the view to the center of the Bounding
|
||||
Box.
|
||||
- **Merge Visible** - If your browser is having performance problems drawing the
|
||||
image in the Unified Canvas, click this to consolidate all of the information
|
||||
currently being rendered by your browser into a merged copy of the image. This
|
||||
lowers the resource requirements and should improve performance.
|
||||
|
||||
### Seam Correction
|
||||
|
||||
When doing Inpainting or Outpainting, Invoke needs to merge the pixels generated
|
||||
by Stable Diffusion into your existing image. To do this, the area around the
|
||||
`seam` at the boundary between your image and the new generation is
|
||||
automatically blended to produce a seamless output. In a fully automatic
|
||||
process, a mask is generated to cover the seam, and then the area of the seam is
|
||||
Inpainted.
|
||||
|
||||
Although the default options should work well most of the time, sometimes it can
|
||||
help to alter the parameters that control the seam Inpainting. A wider seam and
|
||||
a blur setting of about 1/3 of the seam have been noted as producing
|
||||
consistently strong results (e.g. 96 wide and 16 blur - adds up to 32 blur with
|
||||
both sides). Seam strength of 0.7 is best for reducing hard seams.
|
||||
|
||||
- **Seam Size** - The size of the seam masked area. Set higher to make a larger
|
||||
mask around the seam.
|
||||
- **Seam Blur** - The size of the blur that is applied on _each_ side of the
|
||||
masked area.
|
||||
- **Seam Strength** - The Image To Image Strength parameter used for the
|
||||
Inpainting generation that is applied to the seam area.
|
||||
- **Seam Steps** - The number of generation steps that should be used to Inpaint
|
||||
the seam.
|
||||
|
||||
### Infill & Scaling
|
||||
|
||||
- **Scale Before Processing & W/H**: When generating images with a bounding box
|
||||
smaller than the optimized W/H of the model (e.g., 512x512 for SD1.5), this
|
||||
feature first generates at a larger size with the same aspect ratio, and then
|
||||
scales that image down to fill the selected area. This is particularly useful
|
||||
when inpainting very small details. Scaling is optional but is enabled by
|
||||
default.
|
||||
- **Inpaint Replace**: When Inpainting, the default method is to utilize the
|
||||
existing RGB values of the Base layer to inform the generation process. If
|
||||
Inpaint Replace is enabled, noise is generated and blended with the existing
|
||||
pixels (completely replacing the original RGB values at an Inpaint Replace
|
||||
value of 1). This can help generate more variation from the pixels on the Base
|
||||
layers.
|
||||
- When using Inpaint Replace you should use a higher Image To Image Strength
|
||||
value, especially at higher Inpaint Replace values
|
||||
- **Infill Method**: Invoke currently supports two methods for producing RGB
|
||||
values for use in the Outpainting process: Patchmatch and Tile. We believe
|
||||
that Patchmatch is the superior method, however we provide support for Tile in
|
||||
case Patchmatch cannot be installed or is unavailable on your computer.
|
||||
- **Tile Size**: The Tile method for Outpainting sources small portions of the
|
||||
original image and randomly place these into the areas being Outpainted. This
|
||||
value sets the size of those tiles.
|
||||
|
||||
## Hot Keys
|
||||
|
||||
The Unified Canvas is a tool that excels when you use hotkeys. You can view the
|
||||
full list of keyboard shortcuts, updated with all new features, by clicking the
|
||||
Keyboard Shortcuts icon at the top right of the InvokeAI WebUI.
|
||||
# Hot Keys
|
||||
The Unified Canvas is a tool that excels when you use hotkeys. You can view the full list of keyboard shortcuts, updated with all new features, by clicking the Keyboard Shortcuts icon at the top right of the InvokeAI WebUI.
|
||||
|
@ -4,72 +4,59 @@ title: WebUI Hotkey List
|
||||
|
||||
# :material-keyboard: **WebUI Hotkey List**
|
||||
|
||||
## App Hotkeys
|
||||
## General
|
||||
|
||||
| Setting | Hotkey |
|
||||
| ----------------- | ------------------ |
|
||||
| ++"Ctrl\+Enter"++ | Invoke |
|
||||
| ++"Shift\+X"++ | Cancel |
|
||||
| ++"Alt\+A"++ | Focus Prompt |
|
||||
| ++"O"++ | Toggle Options |
|
||||
| ++"Shift\+O"++ | Pin Options |
|
||||
| ++"Z"++ | Toggle Viewer |
|
||||
| ++"G"++ | Toggle Gallery |
|
||||
| ++"F"++ | Maximize Workspace |
|
||||
| ++"1-5"++ | Change Tabs |
|
||||
| ++"`"++ | Toggle Console |
|
||||
| Setting | Hotkey |
|
||||
| ----------------- | ---------------------- |
|
||||
| ++a++ | Set All Parameters |
|
||||
| ++s++ | Set Seed |
|
||||
| ++u++ | Upscale |
|
||||
| ++r++ | Restoration |
|
||||
| ++i++ | Show Metadata |
|
||||
| ++d++ ++d++ ++l++ | Delete Image |
|
||||
| ++alt+a++ | Focus prompt input |
|
||||
| ++shift+i++ | Send To Image to Image |
|
||||
| ++ctrl+enter++ | Start processing |
|
||||
| ++shift+x++ | cancel Processing |
|
||||
| ++shift+d++ | Toggle Dark Mode |
|
||||
| ++"`"++ | Toggle console |
|
||||
|
||||
## General Hotkeys
|
||||
## Tabs
|
||||
|
||||
| Setting | Hotkey |
|
||||
| --------------- | ---------------------- |
|
||||
| ++"P"++ | Set Prompt |
|
||||
| ++"S"++ | Set Seed |
|
||||
| ++"A"++ | Set Parameters |
|
||||
| ++"Shift\+R"++ | Restore Faces |
|
||||
| ++"Shift\+U"++ | Upscale |
|
||||
| ++"I"++ | Show Info |
|
||||
| ++"Shift\+I"++ | Send To Image To Image |
|
||||
| ++"Del"++ | Delete Image |
|
||||
| ++"Esc"++ | Close Panels |
|
||||
| Setting | Hotkey |
|
||||
| ------- | ------------------------- |
|
||||
| ++1++ | Go to Text To Image Tab |
|
||||
| ++2++ | Go to Image to Image Tab |
|
||||
| ++3++ | Go to Inpainting Tab |
|
||||
| ++4++ | Go to Outpainting Tab |
|
||||
| ++5++ | Go to Nodes Tab |
|
||||
| ++6++ | Go to Post Processing Tab |
|
||||
|
||||
## Gallery Hotkeys
|
||||
## Gallery
|
||||
|
||||
| Setting | Hotkey |
|
||||
| ------------------ | --------------------------- |
|
||||
| ++"Arrow Left"++ | Previous Image |
|
||||
| ++"Arrow Right"++ | Next Image |
|
||||
| ++"Shift\+G"++ | Toggle Gallery Pin |
|
||||
| ++"Shift\+Up"++ | Increase Gallery Image Size |
|
||||
| ++"Shift\+Down"++ | Decrease Gallery Image Size |
|
||||
| Setting | Hotkey |
|
||||
| -------------- | ------------------------------- |
|
||||
| ++g++ | Toggle Gallery |
|
||||
| ++left++ | Go to previous image in gallery |
|
||||
| ++right++ | Go to next image in gallery |
|
||||
| ++shift+p++ | Pin gallery |
|
||||
| ++shift+up++ | Increase gallery image size |
|
||||
| ++shift+down++ | Decrease gallery image size |
|
||||
| ++shift+r++ | Reset image gallery size |
|
||||
|
||||
## Unified Canvas Hotkeys
|
||||
## Inpainting
|
||||
|
||||
| Setting | Hotkey |
|
||||
| ------------------------------ | ---------------------- |
|
||||
| ++"B"++ | Select Brush |
|
||||
| ++"E"++ | Select Eraser |
|
||||
| ++"["++ | Decrease Brush Size |
|
||||
| ++"]"++ | Increase Brush Size |
|
||||
| ++"Shift\+["++ | Decrease Brush Opacity |
|
||||
| ++"Shift\+]"++ | Increase Brush Opacity |
|
||||
| ++"V"++ | Move Tool |
|
||||
| ++"Shift\+F"++ | Fill Bounding Box |
|
||||
| ++"Delete/Backspace"++ | Erase Bounding Box |
|
||||
| ++"C"++ | Select Color Picker |
|
||||
| ++"N"++ | Toggle Snap |
|
||||
| ++"Hold Space"++ | Quick Toggle Move |
|
||||
| ++"Q"++ | Toggle Layer |
|
||||
| ++"Shift\+C"++ | Clear Mask |
|
||||
| ++"H"++ | Hide Mask |
|
||||
| ++"Shift\+H"++ | Show/Hide Bounding Box |
|
||||
| ++"Shift\+M"++ | Merge Visible |
|
||||
| ++"Shift\+S"++ | Save To Gallery |
|
||||
| ++"Ctrl\+C"++ | Copy To Clipboard |
|
||||
| ++"Shift\+D"++ | Download Image |
|
||||
| ++"Ctrl\+Z"++ | Undo |
|
||||
| ++"Ctrl\+Y / Ctrl\+Shift\+Z"++ | Redo |
|
||||
| ++"R"++ | Reset View |
|
||||
| ++"Arrow Left"++ | Previous Staging Image |
|
||||
| ++"Arrow Right"++ | Next Staging Image |
|
||||
| ++"Enter"++ | Accept Staging Image |
|
||||
| Setting | Hotkey |
|
||||
| ---------------------------- | --------------------- |
|
||||
| ++"["++ | Decrease brush size |
|
||||
| ++"]"++ | Increase brush size |
|
||||
| ++alt+"["++ | Decrease mask opacity |
|
||||
| ++alt+"]"++ | Increase mask opacity |
|
||||
| ++b++ | Select brush |
|
||||
| ++e++ | Select eraser |
|
||||
| ++ctrl+z++ | Undo brush stroke |
|
||||
| ++ctrl+shift+z++, ++ctrl+y++ | Redo brush stroke |
|
||||
| ++h++ | Hide mask |
|
||||
| ++shift+m++ | Invert mask |
|
||||
| ++shift+c++ | Clear mask |
|
||||
| ++shift+j++ | Expand canvas |
|
||||
|
@ -1,5 +0,0 @@
|
||||
---
|
||||
title: Overview
|
||||
---
|
||||
|
||||
Here you can find the documentation for different features.
|
@ -39,7 +39,7 @@ Looking for a short version? Here's a TL;DR in 3 tables.
|
||||
!!! tip "suggestions"
|
||||
|
||||
For most use cases, `K_LMS`, `K_HEUN` and `K_DPM_2` are the best choices (the latter 2 run 0.5x as quick, but tend to converge 2x as quick as `K_LMS`). At very low steps (≤ `-s8`), `K_HEUN` and `K_DPM_2` are not recommended. Use `K_LMS` instead.
|
||||
|
||||
|
||||
For variability, use `K_EULER_A` (runs 2x as quick as `K_DPM_2_A`).
|
||||
|
||||
---
|
||||
|
233
docs/index.md
233
docs/index.md
@ -6,14 +6,15 @@ title: Home
|
||||
The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as::
|
||||
|
||||
```bash
|
||||
pip install -r docs/requirements-mkdocs.txt
|
||||
pip install -r requirements-mkdocs.txt
|
||||
mkdocs serve
|
||||
```
|
||||
-->
|
||||
|
||||
<div align="center" markdown>
|
||||
|
||||
[](https://github.com/invoke-ai/InvokeAI)
|
||||
# ^^**InvokeAI: A Stable Diffusion Toolkit**^^ :tools: <br> <small>Formerly known as lstein/stable-diffusion</small>
|
||||
|
||||
[](https://github.com/invoke-ai/InvokeAI)
|
||||
|
||||
[![discord badge]][discord link]
|
||||
|
||||
@ -69,11 +70,7 @@ image-to-image generator. It provides a streamlined process with various new
|
||||
features and options to aid the image generation process. It runs on Windows,
|
||||
Mac and Linux machines, and runs on GPU cards with as little as 4 GB or RAM.
|
||||
|
||||
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>]
|
||||
[<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a
|
||||
href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a
|
||||
href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas &
|
||||
Q&A</a>]
|
||||
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||
|
||||
<div align="center"><img src="assets/invoke-web-server-1.png" width=640></div>
|
||||
|
||||
@ -83,19 +80,11 @@ Q&A</a>]
|
||||
|
||||
## :octicons-package-dependencies-24: Installation
|
||||
|
||||
This fork is supported across Linux, Windows and Macintosh. Linux users can use
|
||||
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
||||
driver).
|
||||
|
||||
First time users, please see
|
||||
[Automated Installer](installation/INSTALL_AUTOMATED.md) for a walkthrough of
|
||||
getting InvokeAI up and running on your system. For alternative installation and
|
||||
upgrade instructions, please see:
|
||||
[InvokeAI Installation Overview](installation/)
|
||||
|
||||
Linux users who wish to make use of the PyPatchMatch inpainting functions will
|
||||
need to perform a bit of extra work to enable this module. Instructions can be
|
||||
found at [Installing PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md).
|
||||
This fork is supported across Linux, Windows and Macintosh. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
AMD card (using the ROCm driver). For full installation and upgrade
|
||||
instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||
|
||||
## :fontawesome-solid-computer: Hardware Requirements
|
||||
|
||||
@ -104,29 +93,25 @@ found at [Installing PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md).
|
||||
You wil need one of the following:
|
||||
|
||||
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
|
||||
only)
|
||||
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux only)
|
||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||
|
||||
We do **not recommend** the following video cards due to issues with their
|
||||
running in half-precision mode and having insufficient VRAM to render 512x512
|
||||
images in full-precision mode:
|
||||
|
||||
- NVIDIA 10xx series cards such as the 1080ti
|
||||
- GTX 1650 series cards
|
||||
- GTX 1660 series cards
|
||||
|
||||
### :fontawesome-solid-memory: Memory
|
||||
|
||||
- At least 12 GB Main Memory RAM.
|
||||
|
||||
### :fontawesome-regular-hard-drive: Disk
|
||||
|
||||
- At least 18 GB of free disk space for the machine learning model, Python, and
|
||||
- At least 12 GB of free disk space for the machine learning model, Python, and
|
||||
all its dependencies.
|
||||
|
||||
!!! info
|
||||
|
||||
If you are have a Nvidia 10xx series card (e.g. the 1080ti), please run the invoke script in
|
||||
full-precision mode as shown below.
|
||||
|
||||
Similarly, specify full-precision mode on Apple M1 hardware.
|
||||
|
||||
Precision is auto configured based on the device. If however you encounter errors like
|
||||
`expected type Float but found Half` or `not implemented for Half` you can try starting
|
||||
`invoke.py` with the `--precision=float32` flag:
|
||||
@ -134,115 +119,121 @@ images in full-precision mode:
|
||||
```bash
|
||||
(invokeai) ~/InvokeAI$ python scripts/invoke.py --full_precision
|
||||
```
|
||||
|
||||
## :octicons-gift-24: InvokeAI Features
|
||||
|
||||
- [The InvokeAI Web Interface](features/WEB.md) -
|
||||
[WebGUI hotkey reference guide](features/WEBUIHOTKEYS.md) -
|
||||
[WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
||||
<!-- seperator -->
|
||||
- [The Command Line Interace](features/CLI.md) -
|
||||
[Image2Image](features/IMG2IMG.md) - [Inpainting](features/INPAINTING.md) -
|
||||
[Outpainting](features/OUTPAINTING.md) -
|
||||
[Adding custom styles and subjects](features/CONCEPTS.md) -
|
||||
[Upscaling and Face Reconstruction](features/POSTPROCESS.md)
|
||||
- [The InvokeAI Web Interface](features/WEB.md)
|
||||
- [WebGUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
||||
<!-- this link does not exist - [WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md) -->
|
||||
- [The Command Line Interace](features/CLI.md)
|
||||
- [Image2Image](features/IMG2IMG.md)
|
||||
- [Inpainting](features/INPAINTING.md)
|
||||
- [Outpainting](features/OUTPAINTING.md)
|
||||
- [Adding custom styles and subjects](features/CONCEPTS.md)
|
||||
- [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
|
||||
<!-- seperator -->
|
||||
- [Generating Variations](features/VARIATIONS.md)
|
||||
<!-- seperator -->
|
||||
- [Prompt Engineering](features/PROMPTS.md)
|
||||
<!-- seperator -->
|
||||
- Miscellaneous
|
||||
- [NSFW Checker](features/NSFW.md)
|
||||
- [Embiggen upscaling](features/EMBIGGEN.md)
|
||||
- [Other](features/OTHER.md)
|
||||
- [Embiggen upscaling](features/EMBIGGEN.md)
|
||||
- [Other](features/OTHER.md)
|
||||
|
||||
## :octicons-log-16: Latest Changes
|
||||
|
||||
### v2.2.4 <small>(11 December 2022)</small>
|
||||
### v2.1.3 <small>(13 November 2022)</small>
|
||||
|
||||
#### the `invokeai` directory
|
||||
- A choice of installer scripts that automate installation and configuration. See [Installation](https://github.com/invoke-ai/InvokeAI/blob/2.1.3-rc6/docs/installation/INSTALL.md).
|
||||
- A streamlined manual installation process that works for both Conda and PIP-only installs. See [Manual Installation](https://github.com/invoke-ai/InvokeAI/blob/2.1.3-rc6/docs/installation/INSTALL_MANUAL.md).
|
||||
- The ability to save frequently-used startup options (model to load, steps, sampler, etc) in a `.invokeai` file. See [Client](https://github.com/invoke-ai/InvokeAI/blob/2.1.3-rc6/docs/features/CLI.md)
|
||||
- Support for AMD GPU cards (non-CUDA) on Linux machines.
|
||||
- Multiple bugs and edge cases squashed.
|
||||
|
||||
Previously there were two directories to worry about, the directory that
|
||||
contained the InvokeAI source code and the launcher scripts, and the `invokeai`
|
||||
directory that contained the models files, embeddings, configuration and
|
||||
outputs. With the 2.2.4 release, this dual system is done away with, and
|
||||
everything, including the `invoke.bat` and `invoke.sh` launcher scripts, now
|
||||
live in a directory named `invokeai`. By default this directory is located in
|
||||
your home directory (e.g. `\Users\yourname` on Windows), but you can select
|
||||
where it goes at install time.
|
||||
### v2.1.0 <small>(2 November 2022)</small>
|
||||
|
||||
After installation, you can delete the install directory (the one that the zip
|
||||
file creates when it unpacks). Do **not** delete or move the `invokeai`
|
||||
directory!
|
||||
- [Inpainting](https://invoke-ai.github.io/InvokeAI/features/INPAINTING/)
|
||||
support in the WebGUI
|
||||
- Greatly improved navigation and user experience in the
|
||||
[WebGUI](https://invoke-ai.github.io/InvokeAI/features/WEB/)
|
||||
- The prompt syntax has been enhanced with
|
||||
[prompt weighting, cross-attention and prompt merging](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/).
|
||||
- You can now load
|
||||
[multiple models and switch among them quickly](https://docs.google.com/presentation/d/1WywGA1rny7bpFh7CLSdTr4nNpVKdlUeT0Bj0jCsILyU/edit?usp=sharing)
|
||||
without leaving the CLI.
|
||||
- The installation process (via `scripts/preload_models.py`) now lets you select
|
||||
among several popular
|
||||
[Stable Diffusion models](https://invoke-ai.github.io/InvokeAI/installation/INSTALLING_MODELS/)
|
||||
and downloads and installs them on your behalf. Among other models, this
|
||||
script will install the current Stable Diffusion 1.5 model as well as a
|
||||
StabilityAI variable autoencoder (VAE) which improves face generation.
|
||||
- Tired of struggling with photoeditors to get the masked region of for
|
||||
inpainting just right? Let the AI make the mask for you using
|
||||
[text masking](https://docs.google.com/presentation/d/1pWoY510hCVjz0M6X9CBbTznZgW2W5BYNKrmZm7B45q8/edit#slide=id.p).
|
||||
This feature allows you to specify the part of the image to paint over using
|
||||
just English-language phrases.
|
||||
- Tired of seeing the head of your subjects cropped off? Uncrop them in the CLI
|
||||
with the
|
||||
[outcrop feature](https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/#outcrop).
|
||||
- Tired of seeing your subject's bodies duplicated or mangled when generating
|
||||
larger-dimension images? Check out the `--hires` option in the CLI, or select
|
||||
the corresponding toggle in the WebGUI.
|
||||
- We now support textual inversion and fine-tune .bin styles and subjects from
|
||||
the Hugging Face archive of
|
||||
[SD Concepts](https://huggingface.co/sd-concepts-library). Load the .bin file
|
||||
using the `--embedding_path` option. (The next version will support merging
|
||||
and loading of multiple simultaneous models).
|
||||
- ...
|
||||
|
||||
##### Initialization file `invokeai/invokeai.init`
|
||||
### v2.0.1 <small>(13 October 2022)</small>
|
||||
|
||||
You can place frequently-used startup options in this file, such as the default
|
||||
number of steps or your preferred sampler. To keep everything in one place, this
|
||||
file has now been moved into the `invokeai` directory and is named
|
||||
`invokeai.init`.
|
||||
- fix noisy images at high step count when using k\* samplers
|
||||
- dream.py script now calls invoke.py module directly rather than via a new
|
||||
python process (which could break the environment)
|
||||
|
||||
#### To update from Version 2.2.3
|
||||
### v2.0.0 <small>(9 October 2022)</small>
|
||||
|
||||
The easiest route is to download and unpack one of the 2.2.4 installer files.
|
||||
When it asks you for the location of the `invokeai` runtime directory, respond
|
||||
with the path to the directory that contains your 2.2.3 `invokeai`. That is, if
|
||||
`invokeai` lives at `C:\Users\fred\invokeai`, then answer with `C:\Users\fred`
|
||||
and answer "Y" when asked if you want to reuse the directory.
|
||||
|
||||
The `update.sh` (`update.bat`) script that came with the 2.2.3 source installer
|
||||
does not know about the new directory layout and won't be fully functional.
|
||||
|
||||
#### To update to 2.2.5 (and beyond) there's now an update path.
|
||||
|
||||
As they become available, you can update to more recent versions of InvokeAI
|
||||
using an `update.sh` (`update.bat`) script located in the `invokeai` directory.
|
||||
Running it without any arguments will install the most recent version of
|
||||
InvokeAI. Alternatively, you can get set releases by running the `update.sh`
|
||||
script with an argument in the command shell. This syntax accepts the path to
|
||||
the desired release's zip file, which you can find by clicking on the green
|
||||
"Code" button on this repository's home page.
|
||||
|
||||
#### Other 2.2.4 Improvements
|
||||
|
||||
- Fix InvokeAI GUI initialization by @addianto in #1687
|
||||
- fix link in documentation by @lstein in #1728
|
||||
- Fix broken link by @ShawnZhong in #1736
|
||||
- Remove reference to binary installer by @lstein in #1731
|
||||
- documentation fixes for 2.2.3 by @lstein in #1740
|
||||
- Modify installer links to point closer to the source installer by @ebr in
|
||||
#1745
|
||||
- add documentation warning about 1650/60 cards by @lstein in #1753
|
||||
- Fix Linux source URL in installation docs by @andybearman in #1756
|
||||
- Make install instructions discoverable in readme by @damian0815 in #1752
|
||||
- typo fix by @ofirkris in #1755
|
||||
- Non-interactive model download (support HUGGINGFACE_TOKEN) by @ebr in #1578
|
||||
- fix(srcinstall): shell installer - cp scripts instead of linking by @tildebyte
|
||||
in #1765
|
||||
- stability and usage improvements to binary & source installers by @lstein in
|
||||
#1760
|
||||
- fix off-by-one bug in cross-attention-control by @damian0815 in #1774
|
||||
- Eventually update APP_VERSION to 2.2.3 by @spezialspezial in #1768
|
||||
- invoke script cds to its location before running by @lstein in #1805
|
||||
- Make PaperCut and VoxelArt models load again by @lstein in #1730
|
||||
- Fix --embedding_directory / --embedding_path not working by @blessedcoolant in
|
||||
#1817
|
||||
- Clean up readme by @hipsterusername in #1820
|
||||
- Optimized Docker build with support for external working directory by @ebr in
|
||||
#1544
|
||||
- disable pushing the cloud container by @mauwii in #1831
|
||||
- Fix docker push github action and expand with additional metadata by @ebr in
|
||||
#1837
|
||||
- Fix Broken Link To Notebook by @VedantMadane in #1821
|
||||
- Account for flat models by @spezialspezial in #1766
|
||||
- Update invoke.bat.in isolate environment variables by @lynnewu in #1833
|
||||
- Arch Linux Specific PatchMatch Instructions & fixing conda install on linux by
|
||||
@SammCheese in #1848
|
||||
- Make force free GPU memory work in img2img by @addianto in #1844
|
||||
- New installer by @lstein
|
||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains for
|
||||
backward compatibility.
|
||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||
- Support for
|
||||
<a href="https://invoke-ai.github.io/InvokeAI/features/INPAINTING/">inpainting</a>
|
||||
and
|
||||
<a href="https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/">outpainting</a>
|
||||
- img2img runs on all k\* samplers
|
||||
- Support for
|
||||
<a href="https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts">negative
|
||||
prompts</a>
|
||||
- Support for CodeFormer face reconstruction
|
||||
- Support for Textual Inversion on Macintoshes
|
||||
- Support in both WebGUI and CLI for
|
||||
<a href="https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/">post-processing
|
||||
of previously-generated images</a> using facial reconstruction, ESRGAN
|
||||
upscaling, outcropping (similar to DALL-E infinite canvas), and "embiggen"
|
||||
upscaling. See the `!fix` command.
|
||||
- New `--hires` option on `invoke>` line allows
|
||||
<a href="https://invoke-ai.github.io/InvokeAI/features/CLI/#txt2img">larger
|
||||
images to be created without duplicating elements</a>, at the cost of some
|
||||
performance.
|
||||
- New `--perlin` and `--threshold` options allow you to add and control
|
||||
variation during image generation (see
|
||||
<a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding
|
||||
and Perlin Noise Initialization</a>
|
||||
- Extensive metadata now written into PNG files, allowing reliable regeneration
|
||||
of images and tweaking of previous settings.
|
||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac
|
||||
platforms.
|
||||
- Improved
|
||||
<a href="https://invoke-ai.github.io/InvokeAI/features/CLI/">command-line
|
||||
completion behavior</a>. New commands added:
|
||||
- List command-line history with `!history`
|
||||
- Search command-line history with `!search`
|
||||
- Clear history with `!clear`
|
||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||
configure. To switch away from auto use the new flag like
|
||||
`--precision=float32`.
|
||||
|
||||
For older changelogs, please visit the
|
||||
**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
|
||||
**[CHANGELOG](CHANGELOG/#v114-11-september-2022)**.
|
||||
|
||||
## :material-target: Troubleshooting
|
||||
|
||||
|
@ -1,315 +0,0 @@
|
||||
---
|
||||
title: Installing with the Automated Installer
|
||||
---
|
||||
|
||||
# InvokeAI Automated Installation
|
||||
|
||||
## Introduction
|
||||
|
||||
The automated installer is a shell script that attempts to automate every step
|
||||
needed to install and run InvokeAI on a stock computer running recent versions
|
||||
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
||||
version of InvokeAI with the option to upgrade to experimental versions later.
|
||||
|
||||
## Walk through
|
||||
|
||||
1. Make sure that your system meets the
|
||||
[hardware requirements](../index.md#hardware-requirements) and has the
|
||||
appropriate GPU drivers installed. In particular, if you are a Linux user
|
||||
with an AMD GPU installed, you may need to install the
|
||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||
|
||||
!!! info "Required Space"
|
||||
|
||||
Installation requires roughly 18G of free disk space to load the libraries and
|
||||
recommended model weights files.
|
||||
|
||||
Regardless of your destination disk, your *system drive* (`C:\` on Windows, `/` on macOS/Linux) requires at least 6GB of free disk space to download and cache python dependencies. NOTE for Linux users: if your temporary directory is mounted as a `tmpfs`, ensure it has sufficient space.
|
||||
|
||||
2. Check that your system has an up-to-date Python installed. To do this, open
|
||||
up a command-line window ("Terminal" on Linux and Macintosh, "Command" or
|
||||
"Powershell" on Windows) and type `python --version`. If Python is
|
||||
installed, it will print out the version number. If it is version `3.9.1` or
|
||||
higher, you meet requirements.
|
||||
|
||||
!!! warning "If you see an older version, or get a command not found error"
|
||||
|
||||
Go to [Python Downloads](https://www.python.org/downloads/) and
|
||||
download the appropriate installer package for your platform. We recommend
|
||||
[Version 3.10.9](https://www.python.org/downloads/release/python-3109/),
|
||||
which has been extensively tested with InvokeAI.
|
||||
|
||||
!!! warning "At this time we do not recommend Python 3.11"
|
||||
|
||||
_Please select your platform in the section below for platform-specific
|
||||
setup requirements._
|
||||
|
||||
=== "Windows users"
|
||||
|
||||
- During the Python configuration process,
|
||||
look out for a checkbox to add Python to your PATH
|
||||
and select it. If the install script complains that it can't
|
||||
find python, then open the Python installer again and choose
|
||||
"Modify" existing installation.
|
||||
|
||||
- Installation requires an up to date version of the Microsoft Visual C libraries. Please install the 2015-2022 libraries available here: https://learn.microsoft.com/en-us/cpp/windows/deploying-native-desktop-applications-visual-cpp?view=msvc-170
|
||||
|
||||
=== "Mac users"
|
||||
|
||||
- After installing Python, you may need to run the
|
||||
following command from the Terminal in order to install the Web
|
||||
certificates needed to download model data from https sites. If
|
||||
you see lots of CERTIFICATE ERRORS during the last part of the
|
||||
install, this is the problem, and you can fix it with this command:
|
||||
|
||||
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||
|
||||
- You may need to install the Xcode command line tools. These
|
||||
are a set of tools that are needed to run certain applications in a
|
||||
Terminal, including InvokeAI. This package is provided directly by Apple.
|
||||
|
||||
- To install, open a terminal window and run `xcode-select
|
||||
--install`. You will get a macOS system popup guiding you through the
|
||||
install. If you already have them installed, you will instead see some
|
||||
output in the Terminal advising you that the tools are already installed.
|
||||
|
||||
- More information can be found here:
|
||||
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
|
||||
|
||||
=== "Linux users"
|
||||
|
||||
For reasons that are not entirely clear, installing the correct version of Python can be a bit of a challenge on Ubuntu, Linux Mint, Pop!_OS, and other Debian-derived distributions.
|
||||
|
||||
On Ubuntu 22.04 and higher, run the following:
|
||||
|
||||
```
|
||||
sudo apt update
|
||||
sudo apt install -y python3 python3-pip python3-venv
|
||||
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
|
||||
```
|
||||
|
||||
On Ubuntu 20.04, the process is slightly different:
|
||||
|
||||
```
|
||||
sudo apt update
|
||||
sudo apt install -y software-properties-common
|
||||
sudo add-apt-repository -y ppa:deadsnakes/ppa
|
||||
sudo apt install python3.10 python3-pip python3.10-venv
|
||||
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
|
||||
```
|
||||
|
||||
Both `python` and `python3` commands are now pointing at Python3.10. You can still access older versions of Python by calling `python2`, `python3.8`, etc.
|
||||
|
||||
Linux systems require a couple of additional graphics libraries to be installed for proper functioning of `python3-opencv`. Please run the following:
|
||||
|
||||
`sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
|
||||
|
||||
3. The source installer is distributed in ZIP files. Go to the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||
look for a series of files named:
|
||||
|
||||
- [InvokeAI-installer-2.2.4-p5-mac.zip](https://github.com/invoke-ai/InvokeAI/files/10254728/InvokeAI-installer-2.2.4-p5-mac.zip)
|
||||
- [InvokeAI-installer-2.2.4-p5-windows.zip](https://github.com/invoke-ai/InvokeAI/files/10254729/InvokeAI-installer-2.2.4-p5-windows.zip)
|
||||
- [InvokeAI-installer-2.2.4-p5-linux.zip](https://github.com/invoke-ai/InvokeAI/files/10254727/InvokeAI-installer-2.2.4-p5-linux.zip)
|
||||
|
||||
Download the one that is appropriate for your operating system.
|
||||
|
||||
4. Unpack the zip file into a convenient directory. This will create a new
|
||||
directory named "InvokeAI-Installer". This example shows how this would look
|
||||
using the `unzip` command-line tool, but you may use any graphical or
|
||||
command-line Zip extractor:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> unzip InvokeAI-installer-2.2.4-windows.zip
|
||||
Archive: C: \Linco\Downloads\InvokeAI-installer-2.2.4-windows.zip
|
||||
creating: InvokeAI-Installer\
|
||||
inflating: InvokeAI-Installer\install.bat
|
||||
inflating: InvokeAI-Installer\readme.txt
|
||||
...
|
||||
```
|
||||
|
||||
After successful installation, you can delete the `InvokeAI-Installer`
|
||||
directory.
|
||||
|
||||
5. **Windows only** Please double-click on the file WinLongPathsEnabled.reg and
|
||||
accept the dialog box that asks you if you wish to modify your registry.
|
||||
This activates long filename support on your system and will prevent
|
||||
mysterious errors during installation.
|
||||
|
||||
6. If you are using a desktop GUI, double-click the installer file. It will be
|
||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||
Macintosh systems.
|
||||
|
||||
On Windows systems you will probably get an "Untrusted Publisher" warning.
|
||||
Click on "More Info" and select "Run Anyway." You trust us, right?
|
||||
|
||||
7. Alternatively, from the command line, run the shell script or .bat file:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd InvokeAI-Installer
|
||||
C:\Documents\Linco\invokeAI> install.bat
|
||||
```
|
||||
|
||||
8. The script will ask you to choose where to install InvokeAI. Select a
|
||||
directory with at least 18G of free space for a full install. InvokeAI and
|
||||
all its support files will be installed into a new directory named
|
||||
`invokeai` located at the location you specify.
|
||||
|
||||
- The default is to install the `invokeai` directory in your home directory,
|
||||
usually `C:\Users\YourName\invokeai` on Windows systems,
|
||||
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
|
||||
on Macintoshes, where "YourName" is your login name.
|
||||
|
||||
- The script uses tab autocompletion to suggest directory path completions.
|
||||
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
|
||||
to suggest completions.
|
||||
|
||||
9. Sit back and let the install script work. It will install the third-party
|
||||
libraries needed by InvokeAI, then download the current InvokeAI release and
|
||||
install it.
|
||||
|
||||
Be aware that some of the library download and install steps take a long
|
||||
time. In particular, the `pytorch` package is quite large and often appears
|
||||
to get "stuck" at 99.9%. Have patience and the installation step will
|
||||
eventually resume. However, there are occasions when the library install
|
||||
does legitimately get stuck. If you have been waiting for more than ten
|
||||
minutes and nothing is happening, you can interrupt the script with ^C. You
|
||||
may restart it and it will pick up where it left off.
|
||||
|
||||
10. After installation completes, the installer will launch a script called
|
||||
`configure_invokeai.py`, which will guide you through the first-time process
|
||||
of selecting one or more Stable Diffusion model weights files, downloading
|
||||
and configuring them. We provide a list of popular models that InvokeAI
|
||||
performs well with. However, you can add more weight files later on using
|
||||
the command-line client or the Web UI. See
|
||||
[Installing Models](050_INSTALLING_MODELS.md) for details.
|
||||
|
||||
Note that the main Stable Diffusion weights file is protected by a license
|
||||
agreement that you must agree to in order to use. The script will list the
|
||||
steps you need to take to create an account on the official site that hosts
|
||||
the weights files, accept the agreement, and provide an access token that
|
||||
allows InvokeAI to legally download and install the weights files.
|
||||
|
||||
If you have already downloaded the weights file(s) for another Stable
|
||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
|
||||
|
||||
11. The script will now exit and you'll be ready to generate some images. Look
|
||||
for the directory `invokeai` installed in the location you chose at the
|
||||
beginning of the install session. Look for a shell script named `invoke.sh`
|
||||
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
|
||||
it or typing its name at the command-line:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd invokeai
|
||||
C:\Documents\Linco\invokeAI> invoke.bat
|
||||
```
|
||||
|
||||
- The `invoke.bat` (`invoke.sh`) script will give you the choice of starting
|
||||
(1) the command-line interface, or (2) the web GUI. If you start the
|
||||
latter, you can load the user interface by pointing your browser at
|
||||
http://localhost:9090.
|
||||
|
||||
- The script also offers you a third option labeled "open the developer
|
||||
console". If you choose this option, you will be dropped into a
|
||||
command-line interface in which you can run python commands directly,
|
||||
access developer tools, and launch InvokeAI with customized options.
|
||||
|
||||
12. You can launch InvokeAI with several different command-line arguments that
|
||||
customize its behavior. For example, you can change the location of the
|
||||
image output directory, or select your favorite sampler. See the
|
||||
[Command-Line Interface](../features/CLI.md) for a full list of the options.
|
||||
|
||||
- To set defaults that will take effect every time you launch InvokeAI,
|
||||
use a text editor (e.g. Notepad) to exit the file
|
||||
`invokeai\invokeai.init`. It contains a variety of examples that you can
|
||||
follow to add and modify launch options.
|
||||
|
||||
!!! warning "The `invokeai` directory contains the `invoke` application, its
|
||||
configuration files, the model weight files, and outputs of image generation.
|
||||
Once InvokeAI is installed, do not move or remove this directory."
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### _Package dependency conflicts_
|
||||
|
||||
If you have previously installed InvokeAI or another Stable Diffusion package,
|
||||
the installer may occasionally pick up outdated libraries and either the
|
||||
installer or `invoke` will fail with complaints about library conflicts. You can
|
||||
address this by entering the `invokeai` directory and running `update.sh`, which
|
||||
will bring InvokeAI up to date with the latest libraries.
|
||||
|
||||
### ldm from pypi
|
||||
|
||||
!!! warning
|
||||
|
||||
Some users have tried to correct dependency problems by installing
|
||||
the `ldm` package from PyPi.org. Unfortunately this is an unrelated package that
|
||||
has nothing to do with the 'latent diffusion model' used by InvokeAI. Installing
|
||||
ldm will make matters worse. If you've installed ldm, uninstall it with
|
||||
`pip uninstall ldm`.
|
||||
|
||||
### Corrupted configuration file
|
||||
|
||||
Everything seems to install ok, but `invoke` complains of a corrupted
|
||||
configuration file and goes back into the configuration process (asking you to
|
||||
download models, etc), but this doesn't fix the problem.
|
||||
|
||||
This issue is often caused by a misconfigured configuration directive in the
|
||||
`invokeai\invokeai.init` initialization file that contains startup settings. The
|
||||
easiest way to fix the problem is to move the file out of the way and re-run
|
||||
`configure_invokeai.py`. Enter the developer's console (option 3 of the launcher
|
||||
script) and run this command:
|
||||
|
||||
```cmd
|
||||
configure_invokeai.py --root=.
|
||||
```
|
||||
|
||||
Note the dot (.) after `--root`. It is part of the command.
|
||||
|
||||
_If none of these maneuvers fixes the problem_ then please report the problem to
|
||||
the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
||||
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
|
||||
assistance.
|
||||
|
||||
### other problems
|
||||
|
||||
If you run into problems during or after installation, the InvokeAI team is
|
||||
available to help you. Either create an
|
||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
||||
make a request for help on the "bugs-and-support" channel of our
|
||||
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
||||
organization, but typically somebody will be available to help you within 24
|
||||
hours, and often much sooner.
|
||||
|
||||
## Updating to newer versions
|
||||
|
||||
This distribution is changing rapidly, and we add new features on a daily basis.
|
||||
To update to the latest released version (recommended), run the `update.sh`
|
||||
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
||||
release and re-run the `configure_invokeai` script to download any updated
|
||||
models files that may be needed. You can also use this to add additional models
|
||||
that you did not select at installation time.
|
||||
|
||||
You can now close the developer console and run `invoke` as before. If you get
|
||||
complaints about missing models, then you may need to do the additional step of
|
||||
running `configure_invokeai.py`. This happens relatively infrequently. To do
|
||||
this, simply open up the developer's console again and type
|
||||
`python scripts/configure_invokeai.py`.
|
||||
|
||||
You may also use the `update` script to install any selected version of
|
||||
InvokeAI. From https://github.com/invoke-ai/InvokeAI, navigate to the zip file
|
||||
link of the version you wish to install. You can find the zip links by going to
|
||||
the one of the release pages and looking for the **Assets** section at the
|
||||
bottom. Alternatively, you can browse "branches" and "tags" at the top of the
|
||||
big code directory on the InvokeAI welcome page. When you find the version you
|
||||
want to install, go to the green "<> Code" button at the top, and copy the
|
||||
"Download ZIP" link.
|
||||
|
||||
Now run `update.sh` (or `update.bat`) with the URL of the desired InvokeAI
|
||||
version as its argument. For example, this will install the old 2.2.0 release.
|
||||
|
||||
```cmd
|
||||
update.sh https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.0.zip
|
||||
```
|
@ -1,589 +0,0 @@
|
||||
---
|
||||
title: Installing Manually
|
||||
---
|
||||
|
||||
<figure markdown>
|
||||
# :fontawesome-brands-linux: Linux | :fontawesome-brands-apple: macOS | :fontawesome-brands-windows: Windows
|
||||
</figure>
|
||||
|
||||
!!! warning "This is for advanced Users"
|
||||
|
||||
who are already experienced with using conda or pip
|
||||
|
||||
## Introduction
|
||||
|
||||
You have two choices for manual installation, the [first
|
||||
one](#PIP_method) uses basic Python virtual environment (`venv`)
|
||||
commands and the PIP package manager. The [second one](#Conda_method)
|
||||
based on the Anaconda3 package manager (`conda`). Both methods require
|
||||
you to enter commands on the terminal, also known as the "console".
|
||||
|
||||
Note that the conda install method is currently deprecated and will not
|
||||
be supported at some point in the future.
|
||||
|
||||
On Windows systems you are encouraged to install and use the
|
||||
[Powershell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
|
||||
which provides compatibility with Linux and Mac shells and nice
|
||||
features such as command-line completion.
|
||||
|
||||
## pip Install
|
||||
|
||||
To install InvokeAI with virtual environments and the PIP package
|
||||
manager, please follow these steps:
|
||||
|
||||
1. Make sure you are using Python 3.9 or 3.10. The rest of the install
|
||||
procedure depends on this:
|
||||
|
||||
```bash
|
||||
python -V
|
||||
```
|
||||
|
||||
2. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
|
||||
GitHub:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
```
|
||||
|
||||
This will create InvokeAI folder where you will follow the rest of the
|
||||
steps.
|
||||
|
||||
3. From within the InvokeAI top-level directory, create and activate a virtual
|
||||
environment named `invokeai`:
|
||||
|
||||
```bash
|
||||
python -mvenv invokeai
|
||||
source invokeai/bin/activate
|
||||
```
|
||||
|
||||
4. Make sure that pip is installed in your virtual environment an up to date:
|
||||
|
||||
```bash
|
||||
python -mensurepip --upgrade
|
||||
python -mpip install --upgrade pip
|
||||
```
|
||||
|
||||
5. Pick the correct `requirements*.txt` file for your hardware and operating
|
||||
system.
|
||||
|
||||
We have created a series of environment files suited for different operating
|
||||
systems and GPU hardware. They are located in the
|
||||
`environments-and-requirements` directory:
|
||||
|
||||
<figure markdown>
|
||||
|
||||
| filename | OS |
|
||||
| :---------------------------------: | :-------------------------------------------------------------: |
|
||||
| requirements-lin-amd.txt | Linux with an AMD (ROCm) GPU |
|
||||
| requirements-lin-arm64.txt | Linux running on arm64 systems |
|
||||
| requirements-lin-cuda.txt | Linux with an NVIDIA (CUDA) GPU |
|
||||
| requirements-mac-mps-cpu.txt | Macintoshes with MPS acceleration |
|
||||
| requirements-lin-win-colab-cuda.txt | Windows with an NVIDA (CUDA) GPU<br>(supports Google Colab too) |
|
||||
|
||||
</figure>
|
||||
|
||||
Select the appropriate requirements file, and make a link to it from
|
||||
`requirements.txt` in the top-level InvokeAI directory. The command to do
|
||||
this from the top-level directory is:
|
||||
|
||||
!!! example ""
|
||||
|
||||
=== "Macintosh and Linux"
|
||||
|
||||
!!! info "Replace `xxx` and `yyy` with the appropriate OS and GPU codes."
|
||||
|
||||
```bash
|
||||
ln -sf environments-and-requirements/requirements-xxx-yyy.txt requirements.txt
|
||||
```
|
||||
|
||||
=== "Windows"
|
||||
|
||||
!!! info "on Windows, admin privileges are required to make links, so we use the copy command instead"
|
||||
|
||||
```cmd
|
||||
copy environments-and-requirements\requirements-lin-win-colab-cuda.txt requirements.txt
|
||||
```
|
||||
|
||||
!!! warning
|
||||
|
||||
Please do not link or copy `environments-and-requirements/requirements-base.txt`.
|
||||
This is a base requirements file that does not have the platform-specific
|
||||
libraries. Also, be sure to link or copy the platform-specific file to
|
||||
a top-level file named `requirements.txt` as shown here. Running pip on
|
||||
a requirements file in a subdirectory will not work as expected.
|
||||
|
||||
When this is done, confirm that a file named `requirements.txt` has been
|
||||
created in the InvokeAI root directory and that it points to the correct
|
||||
file in `environments-and-requirements`.
|
||||
|
||||
6. Run PIP
|
||||
|
||||
Be sure that the `invokeai` environment is active before doing this:
|
||||
|
||||
```bash
|
||||
pip install --prefer-binary -r requirements.txt
|
||||
```
|
||||
|
||||
7. Set up the runtime directory
|
||||
|
||||
In this step you will initialize a runtime directory that will
|
||||
contain the models, model config files, directory for textual
|
||||
inversion embeddings, and your outputs. This keeps the runtime
|
||||
directory separate from the source code and aids in updating.
|
||||
|
||||
You may pick any location for this directory using the `--root_dir`
|
||||
option (abbreviated --root). If you don't pass this option, it will
|
||||
default to `invokeai` in your home directory.
|
||||
|
||||
```bash
|
||||
configure_invokeai.py --root_dir ~/Programs/invokeai
|
||||
```
|
||||
|
||||
The script `configure_invokeai.py` will interactively guide you through the
|
||||
process of downloading and installing the weights files needed for InvokeAI.
|
||||
Note that the main Stable Diffusion weights file is protected by a license
|
||||
agreement that you have to agree to. The script will list the steps you need
|
||||
to take to create an account on the site that hosts the weights files,
|
||||
accept the agreement, and provide an access token that allows InvokeAI to
|
||||
legally download and install the weights files.
|
||||
|
||||
If you get an error message about a module not being installed, check that
|
||||
the `invokeai` environment is active and if not, repeat step 5.
|
||||
|
||||
Note that `configure_invokeai.py` and `invoke.py` should be installed
|
||||
under your virtual environment directory and the system should find them
|
||||
on the PATH. If this isn't working on your system, you can call the
|
||||
scripts directory using `python scripts/configure_invokeai.py` and
|
||||
`python scripts/invoke.py`.
|
||||
|
||||
!!! tip
|
||||
|
||||
If you have already downloaded the weights file(s) for another Stable
|
||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [here](050_INSTALLING_MODELS.md).
|
||||
|
||||
8. Run the command-line- or the web- interface:
|
||||
|
||||
Activate the environment (with `source invokeai/bin/activate`), and then
|
||||
run the script `invoke.py`. If you selected a non-default location
|
||||
for the runtime directory, please specify the path with the `--root_dir`
|
||||
option (abbreviated below as `--root`):
|
||||
|
||||
!!! example ""
|
||||
|
||||
!!! warning "Make sure that the virtual environment is activated, which should create `(invokeai)` in front of your prompt!"
|
||||
|
||||
=== "CLI"
|
||||
|
||||
```bash
|
||||
invoke.py --root ~/Programs/invokeai
|
||||
```
|
||||
|
||||
=== "local Webserver"
|
||||
|
||||
```bash
|
||||
invoke.py --web --root ~/Programs/invokeai
|
||||
```
|
||||
|
||||
=== "Public Webserver"
|
||||
|
||||
```bash
|
||||
invoke.py --web --host 0.0.0.0 --root ~/Programs/invokeai
|
||||
```
|
||||
|
||||
If you choose the run the web interface, point your browser at
|
||||
http://localhost:9090 in order to load the GUI.
|
||||
|
||||
!!! tip
|
||||
|
||||
You can permanently set the location of the runtime directory by setting the environment variable INVOKEAI_ROOT to the path of the directory.
|
||||
|
||||
9. Render away!
|
||||
|
||||
Browse the [features](../features/CLI.md) section to learn about all the things you
|
||||
can do with InvokeAI.
|
||||
|
||||
Note that some GPUs are slow to warm up. In particular, when using an AMD
|
||||
card with the ROCm driver, you may have to wait for over a minute the first
|
||||
time you try to generate an image. Fortunately, after the warm up period
|
||||
rendering will be fast.
|
||||
|
||||
10. Subsequently, to relaunch the script, be sure to run "conda activate
|
||||
invokeai", enter the `InvokeAI` directory, and then launch the invoke
|
||||
script. If you forget to activate the 'invokeai' environment, the script
|
||||
will fail with multiple `ModuleNotFound` errors.
|
||||
|
||||
!!! tip
|
||||
|
||||
Do not move the source code repository after installation. The virtual environment directory has absolute paths in it that get confused if the directory is moved.
|
||||
|
||||
---
|
||||
|
||||
### Conda method
|
||||
|
||||
1. Check that your system meets the
|
||||
[hardware requirements](index.md#Hardware_Requirements) and has the
|
||||
appropriate GPU drivers installed. In particular, if you are a Linux user
|
||||
with an AMD GPU installed, you may need to install the
|
||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||
|
||||
InvokeAI does not yet support Windows machines with AMD GPUs due to the lack
|
||||
of ROCm driver support on this platform.
|
||||
|
||||
To confirm that the appropriate drivers are installed, run `nvidia-smi` on
|
||||
NVIDIA/CUDA systems, and `rocm-smi` on AMD systems. These should return
|
||||
information about the installed video card.
|
||||
|
||||
Macintosh users with MPS acceleration, or anybody with a CPU-only system,
|
||||
can skip this step.
|
||||
|
||||
2. You will need to install Anaconda3 and Git if they are not already
|
||||
available. Use your operating system's preferred package manager, or
|
||||
download the installers manually. You can find them here:
|
||||
|
||||
- [Anaconda3](https://www.anaconda.com/)
|
||||
- [git](https://git-scm.com/downloads)
|
||||
|
||||
3. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
|
||||
GitHub:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
```
|
||||
|
||||
This will create InvokeAI folder where you will follow the rest of the
|
||||
steps.
|
||||
|
||||
4. Enter the newly-created InvokeAI folder:
|
||||
|
||||
```bash
|
||||
cd InvokeAI
|
||||
```
|
||||
|
||||
From this step forward make sure that you are working in the InvokeAI
|
||||
directory!
|
||||
|
||||
5. Select the appropriate environment file:
|
||||
|
||||
We have created a series of environment files suited for different operating
|
||||
systems and GPU hardware. They are located in the
|
||||
`environments-and-requirements` directory:
|
||||
|
||||
<figure markdown>
|
||||
|
||||
| filename | OS |
|
||||
| :----------------------: | :----------------------------: |
|
||||
| environment-lin-amd.yml | Linux with an AMD (ROCm) GPU |
|
||||
| environment-lin-cuda.yml | Linux with an NVIDIA CUDA GPU |
|
||||
| environment-mac.yml | Macintosh |
|
||||
| environment-win-cuda.yml | Windows with an NVIDA CUDA GPU |
|
||||
|
||||
</figure>
|
||||
|
||||
Choose the appropriate environment file for your system and link or copy it
|
||||
to `environment.yml` in InvokeAI's top-level directory. To do so, run
|
||||
following command from the repository-root:
|
||||
|
||||
!!! Example ""
|
||||
|
||||
=== "Macintosh and Linux"
|
||||
|
||||
!!! todo "Replace `xxx` and `yyy` with the appropriate OS and GPU codes as seen in the table above"
|
||||
|
||||
```bash
|
||||
ln -sf environments-and-requirements/environment-xxx-yyy.yml environment.yml
|
||||
```
|
||||
|
||||
When this is done, confirm that a file `environment.yml` has been linked in
|
||||
the InvokeAI root directory and that it points to the correct file in the
|
||||
`environments-and-requirements`.
|
||||
|
||||
```bash
|
||||
ls -la
|
||||
```
|
||||
|
||||
=== "Windows"
|
||||
|
||||
!!! todo " Since it requires admin privileges to create links, we will use the copy command to create your `environment.yml`"
|
||||
|
||||
```cmd
|
||||
copy environments-and-requirements\environment-win-cuda.yml environment.yml
|
||||
```
|
||||
|
||||
Afterwards verify that the file `environment.yml` has been created, either via the
|
||||
explorer or by using the command `dir` from the terminal
|
||||
|
||||
```cmd
|
||||
dir
|
||||
```
|
||||
|
||||
!!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
|
||||
|
||||
6. Create the conda environment:
|
||||
|
||||
```bash
|
||||
conda env update
|
||||
```
|
||||
|
||||
This will create a new environment named `invokeai` and install all InvokeAI
|
||||
dependencies into it. If something goes wrong you should take a look at
|
||||
[troubleshooting](#troubleshooting).
|
||||
|
||||
7. Activate the `invokeai` environment:
|
||||
|
||||
In order to use the newly created environment you will first need to
|
||||
activate it
|
||||
|
||||
```bash
|
||||
conda activate invokeai
|
||||
```
|
||||
|
||||
Your command-line prompt should change to indicate that `invokeai` is active
|
||||
by prepending `(invokeai)`.
|
||||
|
||||
8. Set up the runtime directory
|
||||
|
||||
In this step you will initialize a runtime directory that will
|
||||
contain the models, model config files, directory for textual
|
||||
inversion embeddings, and your outputs. This keeps the runtime
|
||||
directory separate from the source code and aids in updating.
|
||||
|
||||
You may pick any location for this directory using the `--root_dir`
|
||||
option (abbreviated --root). If you don't pass this option, it will
|
||||
default to `invokeai` in your home directory.
|
||||
|
||||
```bash
|
||||
python scripts/configure_invokeai.py --root_dir ~/Programs/invokeai
|
||||
```
|
||||
|
||||
The script `configure_invokeai.py` will interactively guide you through the
|
||||
process of downloading and installing the weights files needed for InvokeAI.
|
||||
Note that the main Stable Diffusion weights file is protected by a license
|
||||
agreement that you have to agree to. The script will list the steps you need
|
||||
to take to create an account on the site that hosts the weights files,
|
||||
accept the agreement, and provide an access token that allows InvokeAI to
|
||||
legally download and install the weights files.
|
||||
|
||||
If you get an error message about a module not being installed, check that
|
||||
the `invokeai` environment is active and if not, repeat step 5.
|
||||
|
||||
Note that `configure_invokeai.py` and `invoke.py` should be
|
||||
installed under your conda directory and the system should find
|
||||
them automatically on the PATH. If this isn't working on your
|
||||
system, you can call the scripts directory using `python
|
||||
scripts/configure_invoke.py` and `python scripts/invoke.py`.
|
||||
|
||||
!!! tip
|
||||
|
||||
If you have already downloaded the weights file(s) for another Stable
|
||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [here](050_INSTALLING_MODELS.md).
|
||||
|
||||
9. Run the command-line- or the web- interface:
|
||||
|
||||
Activate the environment (with `source invokeai/bin/activate`), and then
|
||||
run the script `invoke.py`. If you selected a non-default location
|
||||
for the runtime directory, please specify the path with the `--root_dir`
|
||||
option (abbreviated below as `--root`):
|
||||
|
||||
!!! example ""
|
||||
|
||||
!!! warning "Make sure that the conda environment is activated, which should create `(invokeai)` in front of your prompt!"
|
||||
|
||||
=== "CLI"
|
||||
|
||||
```bash
|
||||
invoke.py --root ~/Programs/invokeai
|
||||
```
|
||||
|
||||
=== "local Webserver"
|
||||
|
||||
```bash
|
||||
invoke.py --web --root ~/Programs/invokeai
|
||||
```
|
||||
|
||||
=== "Public Webserver"
|
||||
|
||||
```bash
|
||||
invoke.py --web --host 0.0.0.0 --root ~/Programs/invokeai
|
||||
```
|
||||
|
||||
If you choose the run the web interface, point your browser at
|
||||
http://localhost:9090 in order to load the GUI.
|
||||
|
||||
!!! tip
|
||||
|
||||
You can permanently set the location of the runtime directory by setting the environment variable INVOKEAI_ROOT to the path of your choice.
|
||||
|
||||
10. Render away!
|
||||
|
||||
Browse the [features](../features/CLI.md) section to learn about all the things you
|
||||
can do with InvokeAI.
|
||||
|
||||
Note that some GPUs are slow to warm up. In particular, when using an AMD
|
||||
card with the ROCm driver, you may have to wait for over a minute the first
|
||||
time you try to generate an image. Fortunately, after the warm up period
|
||||
rendering will be fast.
|
||||
|
||||
11. Subsequently, to relaunch the script, be sure to run "conda activate
|
||||
invokeai", enter the `InvokeAI` directory, and then launch the invoke
|
||||
script. If you forget to activate the 'invokeai' environment, the script
|
||||
will fail with multiple `ModuleNotFound` errors.
|
||||
|
||||
## Creating an "install" version of InvokeAI
|
||||
|
||||
If you wish you can install InvokeAI and all its dependencies in the
|
||||
runtime directory. This allows you to delete the source code
|
||||
repository and eliminates the need to provide `--root_dir` at startup
|
||||
time. Note that this method only works with the PIP method.
|
||||
|
||||
1. Follow the instructions for the PIP install, but in step #2 put the
|
||||
virtual environment into the runtime directory. For example, assuming the
|
||||
runtime directory lives in `~/Programs/invokeai`, you'd run:
|
||||
|
||||
```bash
|
||||
python -menv ~/Programs/invokeai
|
||||
```
|
||||
|
||||
2. Now follow steps 3 to 5 in the PIP recipe, ending with the `pip install`
|
||||
step.
|
||||
|
||||
3. Run one additional step while you are in the source code repository
|
||||
directory `pip install .` (note the dot at the end).
|
||||
|
||||
4. That's all! Now, whenever you activate the virtual environment,
|
||||
`invoke.py` will know where to look for the runtime directory without
|
||||
needing a `--root_dir` argument. In addition, you can now move or
|
||||
delete the source code repository entirely.
|
||||
|
||||
(Don't move the runtime directory!)
|
||||
|
||||
## Updating to newer versions of the script
|
||||
|
||||
This distribution is changing rapidly. If you used the `git clone` method
|
||||
(step 5) to download the InvokeAI directory, then to update to the latest and
|
||||
greatest version, launch the Anaconda window, enter `InvokeAI` and type:
|
||||
|
||||
```bash
|
||||
git pull
|
||||
conda env update
|
||||
python scripts/configure_invokeai.py --skip-sd-weights #optional
|
||||
```
|
||||
|
||||
This will bring your local copy into sync with the remote one. The last step may
|
||||
be needed to take advantage of new features or released models. The
|
||||
`--skip-sd-weights` flag will prevent the script from prompting you to download
|
||||
the big Stable Diffusion weights files.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Here are some common issues and their suggested solutions.
|
||||
|
||||
### Conda
|
||||
|
||||
#### Conda fails before completing `conda update`
|
||||
|
||||
The usual source of these errors is a package incompatibility. While we have
|
||||
tried to minimize these, over time packages get updated and sometimes introduce
|
||||
incompatibilities.
|
||||
|
||||
We suggest that you search
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) or the "bugs-and-support"
|
||||
channel of the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
|
||||
|
||||
You may also try to install the broken packages manually using PIP. To do this,
|
||||
activate the `invokeai` environment, and run `pip install` with the name and
|
||||
version of the package that is causing the incompatibility. For example:
|
||||
|
||||
```bash
|
||||
pip install test-tube==0.7.5
|
||||
```
|
||||
|
||||
You can keep doing this until all requirements are satisfied and the `invoke.py`
|
||||
script runs without errors. Please report to
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) what you were able to do
|
||||
to work around the problem so that others can benefit from your investigation.
|
||||
|
||||
### Create Conda Environment fails on MacOS
|
||||
|
||||
If conda create environment fails with lmdb error, this is most likely caused by Clang.
|
||||
Run brew config to see which Clang is installed on your Mac. If Clang isn't installed, that's causing the error.
|
||||
Start by installing additional XCode command line tools, followed by brew install llvm.
|
||||
|
||||
```bash
|
||||
xcode-select --install
|
||||
brew install llvm
|
||||
```
|
||||
|
||||
If brew config has Clang installed, update to the latest llvm and try creating the environment again.
|
||||
|
||||
#### `configure_invokeai.py` or `invoke.py` crashes at an early stage
|
||||
|
||||
This is usually due to an incomplete or corrupted Conda install. Make sure you
|
||||
have linked to the correct environment file and run `conda update` again.
|
||||
|
||||
If the problem persists, a more extreme measure is to clear Conda's caches and
|
||||
remove the `invokeai` environment:
|
||||
|
||||
```bash
|
||||
conda deactivate
|
||||
conda env remove -n invokeai
|
||||
conda clean -a
|
||||
conda update
|
||||
```
|
||||
|
||||
This removes all cached library files, including ones that may have been
|
||||
corrupted somehow. (This is not supposed to happen, but does anyway).
|
||||
|
||||
#### `invoke.py` crashes at a later stage
|
||||
|
||||
If the CLI or web site had been working ok, but something unexpected happens
|
||||
later on during the session, you've encountered a code bug that is probably
|
||||
unrelated to an install issue. Please search
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues), file a bug report, or
|
||||
ask for help on [Discord](https://discord.gg/ZmtBAhwWhy)
|
||||
|
||||
#### My renders are running very slowly
|
||||
|
||||
You may have installed the wrong torch (machine learning) package, and the
|
||||
system is running on CPU rather than the GPU. To check, look at the log messages
|
||||
that appear when `invoke.py` is first starting up. One of the earlier lines
|
||||
should say `Using device type cuda`. On AMD systems, it will also say "cuda",
|
||||
and on Macintoshes, it should say "mps". If instead the message says it is
|
||||
running on "cpu", then you may need to install the correct torch library.
|
||||
|
||||
You may be able to fix this by installing a different torch library. Here are
|
||||
the magic incantations for Conda and PIP.
|
||||
|
||||
!!! todo "For CUDA systems"
|
||||
|
||||
- conda
|
||||
|
||||
```bash
|
||||
conda install pytorch torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia
|
||||
```
|
||||
|
||||
- pip
|
||||
|
||||
```bash
|
||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116
|
||||
```
|
||||
|
||||
!!! todo "For AMD systems"
|
||||
|
||||
- conda
|
||||
|
||||
```bash
|
||||
conda activate invokeai
|
||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||
```
|
||||
|
||||
- pip
|
||||
|
||||
```bash
|
||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||
```
|
||||
|
||||
More information and troubleshooting tips can be found at https://pytorch.org.
|
@ -1,115 +0,0 @@
|
||||
---
|
||||
title: Installing PyPatchMatch
|
||||
---
|
||||
|
||||
# :octicons-paintbrush-16: Installing PyPatchMatch
|
||||
|
||||
pypatchmatch is a Python module for inpainting images. It is not
|
||||
needed to run InvokeAI, but it greatly improves the quality of
|
||||
inpainting and outpainting and is recommended.
|
||||
|
||||
Unfortunately, it is a C++ optimized module and installation
|
||||
can be somewhat challenging. This guide leads you through the steps.
|
||||
|
||||
## Windows
|
||||
|
||||
You're in luck! On Windows platforms PyPatchMatch will install
|
||||
automatically on Windows systems with no extra intervention.
|
||||
|
||||
## Macintosh
|
||||
|
||||
PyPatchMatch is not currently supported, but the team is working on
|
||||
it.
|
||||
|
||||
## Linux
|
||||
|
||||
Prior to installing PyPatchMatch, you need to take the following
|
||||
steps:
|
||||
|
||||
### Debian Based Distros
|
||||
|
||||
|
||||
1. Install the `build-essential` tools:
|
||||
|
||||
```
|
||||
sudo apt update
|
||||
sudo apt install build-essential
|
||||
```
|
||||
|
||||
2. Install `opencv`:
|
||||
|
||||
```
|
||||
sudo apt install python3-opencv libopencv-dev
|
||||
```
|
||||
|
||||
3. Fix the naming of the `opencv` package configuration file:
|
||||
|
||||
```
|
||||
cd /usr/lib/x86_64-linux-gnu/pkgconfig/
|
||||
ln -sf opencv4.pc opencv.pc
|
||||
```
|
||||
|
||||
4. Activate the environment you use for invokeai, either with
|
||||
`conda` or with a virtual environment.
|
||||
|
||||
5. Do a "develop" install of pypatchmatch:
|
||||
|
||||
```
|
||||
pip install "git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch"
|
||||
```
|
||||
|
||||
6. Confirm that pypatchmatch is installed.
|
||||
At the command-line prompt enter `python`, and
|
||||
then at the `>>>` line type `from patchmatch import patch_match`:
|
||||
It should look like the follwing:
|
||||
|
||||
```
|
||||
Python 3.9.5 (default, Nov 23 2021, 15:27:38)
|
||||
[GCC 9.3.0] on linux
|
||||
Type "help", "copyright", "credits" or "license" for more information.
|
||||
>>> from patchmatch import patch_match
|
||||
Compiling and loading c extensions from "/home/lstein/Projects/InvokeAI/.invokeai-env/src/pypatchmatch/patchmatch".
|
||||
rm -rf build/obj libpatchmatch.so
|
||||
mkdir: created directory 'build/obj'
|
||||
mkdir: created directory 'build/obj/csrc/'
|
||||
[dep] csrc/masked_image.cpp ...
|
||||
[dep] csrc/nnf.cpp ...
|
||||
[dep] csrc/inpaint.cpp ...
|
||||
[dep] csrc/pyinterface.cpp ...
|
||||
[CC] csrc/pyinterface.cpp ...
|
||||
[CC] csrc/inpaint.cpp ...
|
||||
[CC] csrc/nnf.cpp ...
|
||||
[CC] csrc/masked_image.cpp ...
|
||||
[link] libpatchmatch.so ...
|
||||
```
|
||||
|
||||
|
||||
### Arch Based Distros
|
||||
|
||||
1. Install the `base-devel` package:
|
||||
```
|
||||
sudo pacman -Syu
|
||||
sudo pacman -S --needed base-devel
|
||||
```
|
||||
|
||||
2. Install `opencv`:
|
||||
```
|
||||
sudo pacman -S opencv
|
||||
```
|
||||
or for CUDA support
|
||||
```
|
||||
sudo pacman -S opencv-cuda
|
||||
```
|
||||
|
||||
3. Fix the naming of the `opencv` package configuration file:
|
||||
```
|
||||
cd /usr/lib/pkgconfig/
|
||||
ln -sf opencv4.pc opencv.pc
|
||||
```
|
||||
|
||||
**Next, Follow Steps 4-6 from the Debian Section above**
|
||||
|
||||
|
||||
If you see no errors, then you're ready to go!
|
||||
|
||||
|
@ -1,89 +0,0 @@
|
||||
---
|
||||
title: build binary installers
|
||||
---
|
||||
|
||||
# :simple-buildkite: How to build "binary" installers (InvokeAI-mac/windows/linux_on_*.zip)
|
||||
|
||||
## 1. Ensure `installers/requirements.in` is correct
|
||||
|
||||
and up to date on the branch to be installed.
|
||||
|
||||
## <a name="step-2"></a> 2. Run `pip-compile` on each platform.
|
||||
|
||||
On each target platform, in the branch that is to be installed, and
|
||||
inside the InvokeAI git root folder, run the following commands:
|
||||
|
||||
```commandline
|
||||
conda activate invokeai # or however you activate python
|
||||
pip install pip-tools
|
||||
pip-compile --allow-unsafe --generate-hashes --output-file=binary_installer/<reqsfile>.txt binary_installer/requirements.in
|
||||
```
|
||||
where `<reqsfile>.txt` is whichever of
|
||||
```commandline
|
||||
py3.10-darwin-arm64-mps-reqs.txt
|
||||
py3.10-darwin-x86_64-reqs.txt
|
||||
py3.10-linux-x86_64-cuda-reqs.txt
|
||||
py3.10-windows-x86_64-cuda-reqs.txt
|
||||
```
|
||||
matches the current OS and architecture.
|
||||
> There is no way to cross-compile these. They must be done on a system matching the target OS and arch.
|
||||
|
||||
## <a name="step-3"></a> 3. Set github repository and branch
|
||||
|
||||
Once all reqs files have been collected and committed **to the branch
|
||||
to be installed**, edit `binary_installer/install.sh.in` and `binary_installer/install.bat.in` so that `RELEASE_URL`
|
||||
and `RELEASE_SOURCEBALL` point to the github repo and branch that is
|
||||
to be installed.
|
||||
|
||||
For example, to install `main` branch of `InvokeAI`, they should be
|
||||
set as follows:
|
||||
|
||||
`install.sh.in`:
|
||||
```commandline
|
||||
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
```
|
||||
|
||||
`install.bat.in`:
|
||||
```commandline
|
||||
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
```
|
||||
|
||||
Or, to install `damians-cool-feature` branch of `damian0815`, set them
|
||||
as follows:
|
||||
|
||||
`install.sh.in`:
|
||||
```commandline
|
||||
RELEASE_URL=https://github.com/damian0815/InvokeAI
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/damians-cool-feature.tar.gz
|
||||
```
|
||||
|
||||
`install.bat.in`:
|
||||
```commandline
|
||||
set RELEASE_URL=https://github.com/damian0815/InvokeAI
|
||||
set RELEASE_SOURCEBALL=/archive/refs/heads/damians-cool-feature.tar.gz
|
||||
```
|
||||
|
||||
The branch and repo specified here **must** contain the correct reqs
|
||||
files. The installer zip files **do not** contain requirements files,
|
||||
they are pulled from the specified branch during the installation
|
||||
process.
|
||||
|
||||
## 4. Create zip files.
|
||||
|
||||
cd into the `installers/` folder and run
|
||||
`./create_installers.sh`. This will create
|
||||
`InvokeAI-mac_on_<branch>.zip`,
|
||||
`InvokeAI-windows_on_<branch>.zip` and
|
||||
`InvokeAI-linux_on_<branch>.zip`. These files can be distributed to end users.
|
||||
|
||||
These zips will continue to function as installers for all future
|
||||
pushes to those branches, as long as necessary changes to
|
||||
`requirements.in` are propagated in a timely manner to the
|
||||
`py3.10-*-reqs.txt` files using pip-compile as outlined in [step
|
||||
2](#step-2).
|
||||
|
||||
To actually install, users should unzip the appropriate zip file into an empty
|
||||
folder and run `install.sh` on macOS/Linux or `install.bat` on
|
||||
Windows.
|
@ -56,7 +56,7 @@ unofficial Stable Diffusion models and where they can be obtained.
|
||||
|
||||
There are three ways to install weights files:
|
||||
|
||||
1. During InvokeAI installation, the `configure_invokeai.py` script can download
|
||||
1. During InvokeAI installation, the `preload_models.py` script can download
|
||||
them for you.
|
||||
|
||||
2. You can use the command-line interface (CLI) to import, configure and modify
|
||||
@ -65,13 +65,13 @@ There are three ways to install weights files:
|
||||
3. You can download the files manually and add the appropriate entries to
|
||||
`models.yaml`.
|
||||
|
||||
### Installation via `configure_invokeai.py`
|
||||
### Installation via `preload_models.py`
|
||||
|
||||
This is the most automatic way. Run `scripts/configure_invokeai.py` from the
|
||||
This is the most automatic way. Run `scripts/preload_models.py` from the
|
||||
console. It will ask you to select which models to download and lead you through
|
||||
the steps of setting up a Hugging Face account if you haven't done so already.
|
||||
|
||||
To start, run `python scripts/configure_invokeai.py` from within the InvokeAI:
|
||||
To start, run `python scripts/preload_models.py` from within the InvokeAI:
|
||||
directory
|
||||
|
||||
!!! example ""
|
||||
@ -100,7 +100,7 @@ directory
|
||||
The original Stable Diffusion version 1.4 weight file (4.27 GB)
|
||||
Download? [n] n
|
||||
[4] waifu-diffusion-1.3:
|
||||
Stable Diffusion 1.4 fine tuned on anime-styled images (4.27 GB)
|
||||
Stable Diffusion 1.4 fine tuned on anime-styled images (4.27)
|
||||
Download? [n] y
|
||||
[5] ft-mse-improved-autoencoder-840000:
|
||||
StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB) (recommended)
|
||||
@ -162,12 +162,6 @@ the command-line client's `!import_model` command.
|
||||
Type a bit of the path name and hit ++tab++ in order to get a choice of
|
||||
possible completions.
|
||||
|
||||
!!! tip "on Windows, you can drag model files onto the command-line"
|
||||
|
||||
Once you have typed in `!import_model `, you can drag the model `.ckpt` file
|
||||
onto the command-line to insert the model path. This way, you don't need to
|
||||
type it or copy/paste.
|
||||
|
||||
4. Follow the wizard's instructions to complete installation as shown in the
|
||||
example here:
|
||||
|
||||
@ -244,7 +238,7 @@ arabian-nights-1.0:
|
||||
| arabian-nights-1.0 | This is the name of the model that you will refer to from within the CLI and the WebGUI when you need to load and use the model. |
|
||||
| description | Any description that you want to add to the model to remind you what it is. |
|
||||
| weights | Relative path to the .ckpt weights file for this model. |
|
||||
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `configure_invokeai.py` script. |
|
||||
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `preload_models.py` script. |
|
||||
| vae | If you want to add a VAE file to the model, then enter its path here. |
|
||||
| width, height | This is the width and height of the images used to train the model. Currently they are always 512 and 512. |
|
||||
|
@ -1 +0,0 @@
|
||||
010_INSTALL_AUTOMATED.md
|
@ -1,12 +1,12 @@
|
||||
---
|
||||
title: Installing with Docker
|
||||
title: Docker
|
||||
---
|
||||
|
||||
# :fontawesome-brands-docker: Docker
|
||||
|
||||
!!! warning "For end users"
|
||||
|
||||
We highly recommend to Install InvokeAI locally using [these instructions](index.md)
|
||||
We highly recommend to Install InvokeAI locally using [these instructions](index.md)"
|
||||
|
||||
!!! tip "For developers"
|
||||
|
||||
@ -16,10 +16,6 @@ title: Installing with Docker
|
||||
|
||||
For general use, install locally to leverage your machine's GPU.
|
||||
|
||||
!!! tip "For running on a cloud instance/service"
|
||||
|
||||
Check out the [Running InvokeAI in the cloud with Docker](#running-invokeai-in-the-cloud-with-docker) section below
|
||||
|
||||
## Why containers?
|
||||
|
||||
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
|
||||
@ -40,7 +36,7 @@ development purposes it's fine. Once you're done with development tasks on your
|
||||
laptop you can build for the target platform and architecture and deploy to
|
||||
another environment with NVIDIA GPUs on-premises or in the cloud.
|
||||
|
||||
## Installation in a Linux container (desktop)
|
||||
## Installation on a Linux container
|
||||
|
||||
### Prerequisites
|
||||
|
||||
@ -78,16 +74,15 @@ Some Suggestions of variables you may want to change besides the Token:
|
||||
|
||||
<figure markdown>
|
||||
|
||||
| Environment-Variable | Default value | Description |
|
||||
| -------------------- | ----------------------------- | -------------------------------------------------------------------------------------------- |
|
||||
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
|
||||
| `REPOSITORY_NAME` | The Basename of the Repo folder | This name will used as the container repository/image name |
|
||||
| `VOLUMENAME` | `${REPOSITORY_NAME,,}_data` | Name of the Docker Volume where model files will be stored |
|
||||
| `ARCH` | arch of the build machine | can be changed if you want to build the image for another arch |
|
||||
| `INVOKEAI_TAG` | latest | the Container Repository / Tag which will be used |
|
||||
| `PIP_REQUIREMENTS` | `requirements-lin-cuda.txt` | the requirements file to use (from `environments-and-requirements`) |
|
||||
| `CONTAINER_FLAVOR` | cuda | the flavor of the image, which can be changed if you build f.e. with amd requirements file. |
|
||||
| `INVOKE_DOCKERFILE` | `docker-build/Dockerfile` | the Dockerfile which should be built, handy for development |
|
||||
| Environment-Variable | Default value | Description |
|
||||
| -------------------- | ----------------------------- | -------------------------------------------------------------------------------------------- |
|
||||
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
|
||||
| `PROJECT_NAME` | `invokeai` | affects the project folder, tag- and volume name |
|
||||
| `VOLUMENAME` | `${PROJECT_NAME}_data` | Name of the Docker Volume where model files will be stored |
|
||||
| `ARCH` | `x86_64` | can be changed to f.e. aarch64 if you are using a ARM based CPU |
|
||||
| `INVOKEAI_TAG` | `${PROJECT_NAME}:${ARCH}` | the Container Repository / Tag which will be used |
|
||||
| `PIP_REQUIREMENTS` | `requirements-lin-cuda.txt` | the requirements file to use (from `environments-and-requirements`) |
|
||||
| `INVOKE_DOCKERFILE` | `docker-build/Dockerfile` | the Dockerfile which should be built, handy for development |
|
||||
|
||||
</figure>
|
||||
|
||||
@ -122,112 +117,12 @@ also do so.
|
||||
./docker-build/run.sh "banana sushi" -Ak_lms -S42 -s10
|
||||
```
|
||||
|
||||
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
|
||||
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
|
||||
|
||||
Find out more about available CLI-Parameters at [features/CLI.md](../../features/CLI/#arguments)
|
||||
|
||||
---
|
||||
|
||||
## Running the container on your GPU
|
||||
|
||||
If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running the container with an extra
|
||||
environment variable to enable GPU usage and have the process run much faster:
|
||||
|
||||
```bash
|
||||
GPU_FLAGS=all ./docker-build/run.sh
|
||||
```
|
||||
|
||||
This passes the `--gpus all` to docker and uses the GPU.
|
||||
|
||||
If you don't have a GPU (or your host is not yet setup to use it) you will see a message like this:
|
||||
|
||||
`docker: Error response from daemon: could not select device driver "" with capabilities: [[gpu]].`
|
||||
|
||||
You can use the full set of GPU combinations documented here:
|
||||
|
||||
https://docs.docker.com/config/containers/resource_constraints/#gpu
|
||||
|
||||
For example, use `GPU_FLAGS=device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a` to choose a specific device identified by a UUID.
|
||||
|
||||
## Running InvokeAI in the cloud with Docker
|
||||
|
||||
We offer an optimized Ubuntu-based image that has been well-tested in cloud deployments. Note: it also works well locally on Linux x86_64 systems with an Nvidia GPU. It *may* also work on Windows under WSL2 and on Intel Mac (not tested).
|
||||
|
||||
An advantage of this method is that it does not need any local setup or additional dependencies.
|
||||
|
||||
See the `docker-build/Dockerfile.cloud` file to familizarize yourself with the image's content.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- a `docker` runtime
|
||||
- `make` (optional but helps for convenience)
|
||||
- Huggingface token to download models, or an existing InvokeAI runtime directory from a previous installation
|
||||
|
||||
Neither local Python nor any dependencies are required. If you don't have `make` (part of `build-essentials` on Ubuntu), or do not wish to install it, the commands from the `docker-build/Makefile` are readily adaptable to be executed directly.
|
||||
|
||||
### Building and running the image locally
|
||||
|
||||
1. Clone this repo and `cd docker-build`
|
||||
1. `make build` - this will build the image. (This does *not* require a GPU-capable system).
|
||||
1. _(skip this step if you already have a complete InvokeAI runtime directory)_
|
||||
- `make configure` (This does *not* require a GPU-capable system)
|
||||
- this will create a local cache of models and configs (a.k.a the _runtime dir_)
|
||||
- enter your Huggingface token when prompted
|
||||
1. `make web`
|
||||
1. Open the `http://localhost:9090` URL in your browser, and enjoy the banana sushi!
|
||||
|
||||
To use InvokeAI on the cli, run `make cli`. To open a Bash shell in the container for arbitraty advanced use, `make shell`.
|
||||
|
||||
#### Building and running without `make`
|
||||
|
||||
(Feel free to adapt paths such as `${HOME}/invokeai` to your liking, and modify the CLI arguments as necessary).
|
||||
|
||||
!!! example "Build the image and configure the runtime directory"
|
||||
```Shell
|
||||
cd docker-build
|
||||
|
||||
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
|
||||
|
||||
docker run --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/configure_invokeai.py"
|
||||
```
|
||||
|
||||
!!! example "Run the web server"
|
||||
```Shell
|
||||
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai -p9090:9090 local/invokeai:latest
|
||||
```
|
||||
|
||||
Access the Web UI at http://localhost:9090
|
||||
|
||||
!!! example "Run the InvokeAI interactive CLI"
|
||||
```
|
||||
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/invoke.py"
|
||||
```
|
||||
|
||||
### Running the image in the cloud
|
||||
|
||||
This image works anywhere you can run a container with a mounted Docker volume. You may either build this image on a cloud instance, or build and push it to your Docker registry. To manually run this on a cloud instance (such as AWS EC2, GCP or Azure VM):
|
||||
|
||||
1. build this image either in the cloud (you'll need to pull the repo), or locally
|
||||
1. `docker tag` it as `your-registry/invokeai` and push to your registry (i.e. Dockerhub)
|
||||
1. `docker pull` it on your cloud instance
|
||||
1. configure the runtime directory as per above example, using `docker run ... configure_invokeai.py` script
|
||||
1. use either one of the `docker run` commands above, substituting the image name for your own image.
|
||||
|
||||
To run this on Runpod, please refer to the following Runpod template: https://www.runpod.io/console/gpu-secure-cloud?template=vm19ukkycf (you need a Runpod subscription). When launching the template, feel free to set the image to pull your own build.
|
||||
|
||||
The template's `README` provides ample detail, but at a high level, the process is as follows:
|
||||
|
||||
1. create a pod using this Docker image
|
||||
1. ensure the pod has an `INVOKEAI_ROOT=<path_to_your_persistent_volume>` environment variable, and that it corresponds to the path to your pod's persistent volume mount
|
||||
1. Run the pod with `sleep infinity` as the Docker command
|
||||
1. Use Runpod basic SSH to connect to the pod, and run `python scripts/configure_invokeai.py` script
|
||||
1. Stop the pod, and change the Docker command to `python scripts/invoke.py --web --host 0.0.0.0`
|
||||
1. Run the pod again, connect to your pod on HTTP port 9090, and enjoy the banana sushi!
|
||||
|
||||
Running on other cloud providers such as Vast.ai will likely work in a similar fashion.
|
||||
|
||||
---
|
||||
|
||||
!!! warning "Deprecated"
|
||||
|
||||
From here on you will find the the previous Docker-Docs, which will still
|
||||
@ -240,12 +135,12 @@ Running on other cloud providers such as Vast.ai will likely work in a similar f
|
||||
If you're on a **Linux container** the `invoke` script is **automatically
|
||||
started** and the output dir set to the Docker volume you created earlier.
|
||||
|
||||
If you're **directly on macOS follow these startup instructions**.
|
||||
If you're **directly on macOS follow these startup instructions**.
|
||||
With the Conda environment activated (`conda activate ldm`), run the interactive
|
||||
interface that combines the functionality of the original scripts `txt2img` and
|
||||
`img2img`:
|
||||
`img2img`:
|
||||
Use the more accurate but VRAM-intensive full precision math because
|
||||
half-precision requires autocast and won't work.
|
||||
half-precision requires autocast and won't work.
|
||||
By default the images are saved in `outputs/img-samples/`.
|
||||
|
||||
```Shell
|
||||
@ -262,8 +157,8 @@ invoke> q
|
||||
### Text to Image
|
||||
|
||||
For quick (but bad) image results test with 5 steps (default 50) and 1 sample
|
||||
image. This will let you know that everything is set up correctly.
|
||||
Then increase steps to 100 or more for good (but slower) results.
|
||||
image. This will let you know that everything is set up correctly.
|
||||
Then increase steps to 100 or more for good (but slower) results.
|
||||
The prompt can be in quotes or not.
|
||||
|
||||
```Shell
|
||||
@ -277,8 +172,8 @@ You'll need to experiment to see if face restoration is making it better or
|
||||
worse for your specific prompt.
|
||||
|
||||
If you're on a container the output is set to the Docker volume. You can copy it
|
||||
wherever you want.
|
||||
You can download it from the Docker Desktop app, Volumes, my-vol, data.
|
||||
wherever you want.
|
||||
You can download it from the Docker Desktop app, Volumes, my-vol, data.
|
||||
Or you can copy it from your Mac terminal. Keep in mind `docker cp` can't expand
|
||||
`*.png` so you'll need to specify the image file name.
|
||||
|
@ -1,8 +1,8 @@
|
||||
---
|
||||
title: InvokeAI Binary Installer
|
||||
title: InvokeAI Installer
|
||||
---
|
||||
|
||||
The InvokeAI binary installer is a shell script that will install InvokeAI onto a stock
|
||||
The InvokeAI installer is a shell script that will install InvokeAI onto a stock
|
||||
computer running recent versions of Linux, MacOSX or Windows. It will leave you
|
||||
with a version that runs a stable version of InvokeAI. When a new version of
|
||||
InvokeAI is released, you will download and reinstall the new version.
|
||||
@ -10,7 +10,7 @@ InvokeAI is released, you will download and reinstall the new version.
|
||||
If you wish to tinker with unreleased versions of InvokeAI that introduce
|
||||
potentially unstable new features, you should consider using the
|
||||
[source installer](INSTALL_SOURCE.md) or one of the
|
||||
[manual install](../020_INSTALL_MANUAL.md) methods.
|
||||
[manual install](INSTALL_MANUAL.md) methods.
|
||||
|
||||
**Important Caveats**
|
||||
- This script does not support AMD GPUs. For Linux AMD support,
|
||||
@ -36,7 +36,7 @@ recommended model weights files.
|
||||
|
||||
1. Download the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest) of
|
||||
InvokeAI's installer for your platform. Look for a file named `InvokeAI-binary-<your platform>.zip`
|
||||
InvokeAI's installer for your platform
|
||||
|
||||
2. Place the downloaded package someplace where you have plenty of HDD space,
|
||||
and have full permissions (i.e. `~/` on Lin/Mac; your home folder on Windows)
|
27
docs/installation/INSTALL_JUPYTER.md
Normal file
27
docs/installation/INSTALL_JUPYTER.md
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
title: Running InvokeAI on Google Colab using a Jupyter Notebook
|
||||
---
|
||||
|
||||
# THIS NEEDS TO BE FLESHED OUT
|
||||
|
||||
## Introduction
|
||||
|
||||
We have a [Jupyter
|
||||
notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable-Diffusion-local-Windows.ipynb)
|
||||
with cell-by-cell installation steps. It will download the code in
|
||||
this repo as one of the steps, so instead of cloning this repo, simply
|
||||
download the notebook from the link above and load it up in VSCode
|
||||
(with the appropriate extensions installed)/Jupyter/JupyterLab and
|
||||
start running the cells one-by-one.
|
||||
|
||||
!!! Note "you will need NVIDIA drivers, Python 3.10, and Git installed beforehand"
|
||||
|
||||
## Walkthrough
|
||||
|
||||
## Updating to newer versions
|
||||
|
||||
### Updating the stable version
|
||||
|
||||
### Updating to the development version
|
||||
|
||||
## Troubleshooting
|
@ -8,7 +8,7 @@ title: Manual Installation
|
||||
|
||||
!!! warning "This is for advanced Users"
|
||||
|
||||
who are already experienced with using conda or pip
|
||||
who are already expirienced with using conda or pip
|
||||
|
||||
## Introduction
|
||||
|
||||
@ -121,8 +121,8 @@ command-line completion.
|
||||
dir
|
||||
```
|
||||
|
||||
!!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
|
||||
|
||||
!!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
|
||||
|
||||
6. Create the conda environment:
|
||||
|
||||
```bash
|
||||
@ -155,10 +155,10 @@ command-line completion.
|
||||
process for this is described in [here](INSTALLING_MODELS.md).
|
||||
|
||||
```bash
|
||||
python scripts/configure_invokeai.py
|
||||
python scripts/preload_models.py
|
||||
```
|
||||
|
||||
The script `configure_invokeai.py` will interactively guide you through the
|
||||
The script `preload_models.py` will interactively guide you through the
|
||||
process of downloading and installing the weights files needed for InvokeAI.
|
||||
Note that the main Stable Diffusion weights file is protected by a license
|
||||
agreement that you have to agree to. The script will list the steps you need
|
||||
@ -220,7 +220,7 @@ greatest version, launch the Anaconda window, enter `InvokeAI` and type:
|
||||
```bash
|
||||
git pull
|
||||
conda env update
|
||||
python scripts/configure_invokeai.py --no-interactive #optional
|
||||
python scripts/preload_models.py --no-interactive #optional
|
||||
```
|
||||
|
||||
This will bring your local copy into sync with the remote one. The last step may
|
||||
@ -359,7 +359,7 @@ brew install llvm
|
||||
|
||||
If brew config has Clang installed, update to the latest llvm and try creating the environment again.
|
||||
|
||||
#### `configure_invokeai.py` or `invoke.py` crashes at an early stage
|
||||
#### `preload_models.py` or `invoke.py` crashes at an early stage
|
||||
|
||||
This is usually due to an incomplete or corrupted Conda install. Make sure you
|
||||
have linked to the correct environment file and run `conda update` again.
|
||||
|
156
docs/installation/INSTALL_SOURCE.md
Normal file
156
docs/installation/INSTALL_SOURCE.md
Normal file
@ -0,0 +1,156 @@
|
||||
---
|
||||
title: Source Installer
|
||||
---
|
||||
|
||||
# The InvokeAI Source Installer
|
||||
|
||||
## Introduction
|
||||
|
||||
The source installer is a shell script that attempts to automate every step
|
||||
needed to install and run InvokeAI on a stock computer running recent versions
|
||||
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
||||
version of InvokeAI with the option to upgrade to experimental versions later.
|
||||
It is not as foolproof as the [InvokeAI installer](INSTALL_INVOKE.md)
|
||||
|
||||
Before you begin, make sure that you meet the
|
||||
[hardware requirements](index.md#Hardware_Requirements) and has the appropriate
|
||||
GPU drivers installed. In particular, if you are a Linux user with an AMD GPU
|
||||
installed, you may need to install the
|
||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||
|
||||
Installation requires roughly 18G of free disk space to load the libraries and
|
||||
recommended model weights files.
|
||||
|
||||
## Walk through
|
||||
|
||||
Though there are multiple steps, there really is only one click involved to kick
|
||||
off the process.
|
||||
|
||||
1. The source installer is distributed in ZIP files. Go to the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||
look for a series of files named:
|
||||
|
||||
- invokeAI-src-installer-mac.zip
|
||||
- invokeAI-src-installer-windows.zip
|
||||
- invokeAI-src-installer-linux.zip
|
||||
|
||||
Download the one that is appropriate for your operating system.
|
||||
|
||||
2. Unpack the zip file into a directory that has at least 18G of free space. Do
|
||||
_not_ unpack into a directory that has an earlier version of InvokeAI.
|
||||
|
||||
This will create a new directory named "InvokeAI". This example shows how
|
||||
this would look using the `unzip` command-line tool, but you may use any
|
||||
graphical or command-line Zip extractor:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> unzip invokeAI-windows.zip
|
||||
Archive: C: \Linco\Downloads\invokeAI-linux.zip
|
||||
creating: invokeAI\
|
||||
inflating: invokeAI\install.bat
|
||||
inflating: invokeAI\readme.txt
|
||||
```
|
||||
|
||||
3. If you are using a desktop GUI, double-click the installer file. It will be
|
||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||
Macintosh systems.
|
||||
|
||||
4. Alternatively, form the command line, run the shell script or .bat file:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd invokeAI
|
||||
C:\Documents\Linco\invokeAI> install.bat
|
||||
```
|
||||
|
||||
5. Sit back and let the install script work. It will install various binary
|
||||
requirements including Conda, Git and Python, then download the current
|
||||
InvokeAI code and install it along with its dependencies.
|
||||
|
||||
6. After installation completes, the installer will launch a script called
|
||||
`preload_models.py`, which will guide you through the first-time process of
|
||||
selecting one or more Stable Diffusion model weights files, downloading and
|
||||
configuring them.
|
||||
|
||||
Note that the main Stable Diffusion weights file is protected by a license
|
||||
agreement that you must agree to in order to use. The script will list the
|
||||
steps you need to take to create an account on the official site that hosts
|
||||
the weights files, accept the agreement, and provide an access token that
|
||||
allows InvokeAI to legally download and install the weights files.
|
||||
|
||||
If you have already downloaded the weights file(s) for another Stable
|
||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [Installing Models](INSTALLING_MODELS.md).
|
||||
|
||||
7. The script will now exit and you'll be ready to generate some images. The
|
||||
invokeAI directory will contain numerous files. Look for a shell script
|
||||
named `invoke.sh` (Linux/Mac) or `invoke.bat` (Windows). Launch the script
|
||||
by double-clicking it or typing its name at the command-line:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd invokeAI
|
||||
C:\Documents\Linco\invokeAI> invoke.bat
|
||||
```
|
||||
|
||||
The `invoke.bat` (`invoke.sh`) script will give you the choice of starting (1)
|
||||
the command-line interface, or (2) the web GUI. If you start the latter, you can
|
||||
load the user interface by pointing your browser at http://localhost:9090.
|
||||
|
||||
The `invoke` script also offers you a third option labeled "open the developer
|
||||
console". If you choose this option, you will be dropped into a command-line
|
||||
interface in which you can run python commands directly, access developer tools,
|
||||
and launch InvokeAI with customized options. To do the latter, you would launch
|
||||
the script `scripts/invoke.py` as shown in this example:
|
||||
|
||||
```cmd
|
||||
python scripts/invoke.py --web --max_load_models=3 \
|
||||
--model=waifu-1.3 --steps=30 --outdir=C:/Documents/AIPhotos
|
||||
```
|
||||
|
||||
These options are described in detail in the
|
||||
[Command-Line Interface](../features/CLI.md) documentation.
|
||||
|
||||
## Updating to newer versions
|
||||
|
||||
This section describes how to update InvokeAI to new versions of the software.
|
||||
|
||||
### Updating the stable version
|
||||
|
||||
This distribution is changing rapidly, and we add new features on a daily basis.
|
||||
To update to the latest released version (recommended), run the `update.sh`
|
||||
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
||||
release and re-run the `preload_models` script to download any updated models
|
||||
files that may be needed. You can also use this to add additional models that
|
||||
you did not select at installation time.
|
||||
|
||||
### Updating to the development version
|
||||
|
||||
There may be times that there is a feature in the `development` branch of
|
||||
InvokeAI that you'd like to take advantage of. Or perhaps there is a branch that
|
||||
corrects an annoying bug. To do this, you will use the developer's console.
|
||||
|
||||
From within the invokeAI directory, run the command `invoke.sh` (Linux/Mac) or
|
||||
`invoke.bat` (Windows) and selection option (3) to open the developers console.
|
||||
Then run the following command to get the `development branch`:
|
||||
|
||||
```bash
|
||||
git checkout development
|
||||
git pull
|
||||
conda env update
|
||||
```
|
||||
|
||||
You can now close the developer console and run `invoke` as before. If you get
|
||||
complaints about missing models, then you may need to do the additional step of
|
||||
running `preload_models.py`. This happens relatively infrequently. To do this,
|
||||
simply open up the developer's console again and type
|
||||
`python scripts/preload_models.py`.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you run into problems during or after installation, the InvokeAI team is
|
||||
available to help you. Either create an
|
||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
||||
make a request for help on the "bugs-and-support" channel of our
|
||||
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
||||
organization, but typically somebody will be available to help you within 24
|
||||
hours, and often much sooner.
|
@ -1,32 +0,0 @@
|
||||
---
|
||||
title: Running InvokeAI on Google Colab using a Jupyter Notebook
|
||||
---
|
||||
|
||||
## Introduction
|
||||
|
||||
We have a [Jupyter
|
||||
notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
|
||||
with cell-by-cell installation steps. It will download the code in
|
||||
this repo as one of the steps, so instead of cloning this repo, simply
|
||||
download the notebook from the link above and load it up in VSCode
|
||||
(with the appropriate extensions installed)/Jupyter/JupyterLab and
|
||||
start running the cells one-by-one.
|
||||
|
||||
!!! Note "you will need NVIDIA drivers, Python 3.10, and Git installed beforehand"
|
||||
|
||||
## Running Online On Google Colabotary
|
||||
[](https://colab.research.google.com/github/invoke-ai/InvokeAI/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
|
||||
|
||||
## Running Locally (Cloning)
|
||||
|
||||
1. Install the Jupyter Notebook python library (one-time):
|
||||
pip install jupyter
|
||||
|
||||
2. Clone the InvokeAI repository:
|
||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
cd invoke-ai
|
||||
3. Create a virtual environment using conda:
|
||||
conda create -n invoke jupyter
|
||||
4. Activate the environment and start the Jupyter notebook:
|
||||
conda activate invoke
|
||||
jupyter notebook
|
@ -1,225 +0,0 @@
|
||||
---
|
||||
title: Source Installer
|
||||
---
|
||||
|
||||
# The InvokeAI Source Installer
|
||||
|
||||
## Introduction
|
||||
|
||||
The source installer is a shell script that attempts to automate every step
|
||||
needed to install and run InvokeAI on a stock computer running recent versions
|
||||
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
||||
version of InvokeAI with the option to upgrade to experimental versions later.
|
||||
|
||||
Before you begin, make sure that you meet the
|
||||
[hardware requirements](../../index.md#hardware-requirements) and has the appropriate
|
||||
GPU drivers installed. In particular, if you are a Linux user with an AMD GPU
|
||||
installed, you may need to install the
|
||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||
|
||||
Installation requires roughly 18G of free disk space to load the libraries and
|
||||
recommended model weights files.
|
||||
|
||||
## Walk through
|
||||
|
||||
Though there are multiple steps, there really is only one click involved to kick
|
||||
off the process.
|
||||
|
||||
1. The source installer is distributed in ZIP files. Go to the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||
look for a series of files named:
|
||||
|
||||
- [invokeAI-src-installer-2.2.3-mac.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/invokeAI-src-installer-2.2.3-mac.zip)
|
||||
- [invokeAI-src-installer-2.2.3-windows.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/invokeAI-src-installer-2.2.3-windows.zip)
|
||||
- [invokeAI-src-installer-2.2.3-linux.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/invokeAI-src-installer-2.2.3-linux.zip)
|
||||
|
||||
Download the one that is appropriate for your operating system.
|
||||
|
||||
2. Unpack the zip file into a directory that has at least 18G of free space. Do
|
||||
_not_ unpack into a directory that has an earlier version of InvokeAI.
|
||||
|
||||
This will create a new directory named "InvokeAI". This example shows how
|
||||
this would look using the `unzip` command-line tool, but you may use any
|
||||
graphical or command-line Zip extractor:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> unzip invokeAI-windows.zip
|
||||
Archive: C: \Linco\Downloads\invokeAI-linux.zip
|
||||
creating: invokeAI\
|
||||
inflating: invokeAI\install.bat
|
||||
inflating: invokeAI\readme.txt
|
||||
```
|
||||
|
||||
3. If you are a macOS user, you may need to install the Xcode command line tools.
|
||||
These are a set of tools that are needed to run certain applications in a Terminal,
|
||||
including InvokeAI. This package is provided directly by Apple.
|
||||
|
||||
To install, open a terminal window and run `xcode-select --install`. You will get
|
||||
a macOS system popup guiding you through the install. If you already have them
|
||||
installed, you will instead see some output in the Terminal advising you that the
|
||||
tools are already installed.
|
||||
|
||||
More information can be found here:
|
||||
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
|
||||
|
||||
4. If you are using a desktop GUI, double-click the installer file. It will be
|
||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||
Macintosh systems.
|
||||
|
||||
5. Alternatively, from the command line, run the shell script or .bat file:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd invokeAI
|
||||
C:\Documents\Linco\invokeAI> install.bat
|
||||
```
|
||||
|
||||
6. Sit back and let the install script work. It will install various binary
|
||||
requirements including Conda, Git and Python, then download the current
|
||||
InvokeAI code and install it along with its dependencies.
|
||||
|
||||
Be aware that some of the library download and install steps take a long time.
|
||||
In particular, the `pytorch` package is quite large and often appears to get
|
||||
"stuck" at 99.9%. Similarly, the `pip installing requirements` step may
|
||||
appear to hang. Have patience and the installation step will eventually
|
||||
resume. However, there are occasions when the library install does
|
||||
legitimately get stuck. If you have been waiting for more than ten minutes
|
||||
and nothing is happening, you can interrupt the script with ^C. You may restart
|
||||
it and it will pick up where it left off.
|
||||
|
||||
7. After installation completes, the installer will launch a script called
|
||||
`configure_invokeai.py`, which will guide you through the first-time process of
|
||||
selecting one or more Stable Diffusion model weights files, downloading and
|
||||
configuring them.
|
||||
|
||||
Note that the main Stable Diffusion weights file is protected by a license
|
||||
agreement that you must agree to in order to use. The script will list the
|
||||
steps you need to take to create an account on the official site that hosts
|
||||
the weights files, accept the agreement, and provide an access token that
|
||||
allows InvokeAI to legally download and install the weights files.
|
||||
|
||||
If you have already downloaded the weights file(s) for another Stable
|
||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [Installing Models](../050_INSTALLING_MODELS.md).
|
||||
|
||||
8. The script will now exit and you'll be ready to generate some images. The
|
||||
invokeAI directory will contain numerous files. Look for a shell script
|
||||
named `invoke.sh` (Linux/Mac) or `invoke.bat` (Windows). Launch the script
|
||||
by double-clicking it or typing its name at the command-line:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd invokeAI
|
||||
C:\Documents\Linco\invokeAI> invoke.bat
|
||||
```
|
||||
|
||||
The `invoke.bat` (`invoke.sh`) script will give you the choice of starting (1)
|
||||
the command-line interface, or (2) the web GUI. If you start the latter, you can
|
||||
load the user interface by pointing your browser at http://localhost:9090.
|
||||
|
||||
The `invoke` script also offers you a third option labeled "open the developer
|
||||
console". If you choose this option, you will be dropped into a command-line
|
||||
interface in which you can run python commands directly, access developer tools,
|
||||
and launch InvokeAI with customized options. To do the latter, you would launch
|
||||
the script `scripts/invoke.py` as shown in this example:
|
||||
|
||||
```cmd
|
||||
python scripts/invoke.py --web --max_load_models=3 \
|
||||
--model=waifu-1.3 --steps=30 --outdir=C:/Documents/AIPhotos
|
||||
```
|
||||
|
||||
These options are described in detail in the
|
||||
[Command-Line Interface](../../features/CLI.md) documentation.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
_Package dependency conflicts_ If you have previously installed
|
||||
InvokeAI or another Stable Diffusion package, the installer may
|
||||
occasionally pick up outdated libraries and either the installer or
|
||||
`invoke` will fail with complaints out library conflicts. There are
|
||||
two steps you can take to clear this problem. Both of these are done
|
||||
from within the "developer's console", which you can get to by
|
||||
launching `invoke.sh` (or `invoke.bat`) and selecting launch option
|
||||
#3:
|
||||
|
||||
1. Remove the previous `invokeai` environment completely. From within
|
||||
the developer's console, give the command `conda env remove -n
|
||||
invokeai`. This will delete previous files installed by `invoke`.
|
||||
|
||||
Then exit from the developer's console and launch the script
|
||||
`update.sh` (or `update.bat`). This will download the most recent
|
||||
InvokeAI (including bug fixes) and reinstall the environment.
|
||||
You should then be able to run `invoke.sh`/`invoke.bat`.
|
||||
|
||||
2. If this doesn't work, you can try cleaning your system's conda
|
||||
cache. This is slightly more extreme, but won't interfere with
|
||||
any other python-based programs installed on your computer.
|
||||
From the developer's console, run the command `conda clean -a`
|
||||
and answer "yes" to all prompts.
|
||||
|
||||
After this is done, run `update.sh` and try again as before.
|
||||
|
||||
_"Corrupted configuration file."__ Everything seems to install ok, but
|
||||
`invoke` complains of a corrupted configuration file and goes calls
|
||||
`configure_invokeai.py` to fix, but this doesn't fix the problem.
|
||||
|
||||
This issue is often caused by a misconfigured configuration directive
|
||||
in the `.invokeai` initialization file that contains startup settings.
|
||||
This can be corrected by fixing the offending line.
|
||||
|
||||
First find `.invokeai`. It is a small text file located in your home
|
||||
directory, `~/.invokeai` on Mac and Linux systems, and `C:\Users\*your
|
||||
name*\.invokeai` on Windows systems. Open it with a text editor
|
||||
(e.g. Notepad on Windows, TextEdit on Macs, or `nano` on Linux)
|
||||
and look for the lines starting with `--root` and `--outdir`.
|
||||
|
||||
An example is here:
|
||||
|
||||
```cmd
|
||||
--root="/home/lstein/invokeai"
|
||||
--outdir="/home/lstein/invokeai/outputs"
|
||||
```
|
||||
|
||||
There should not be whitespace before or after the directory paths,
|
||||
and the paths should not end with slashes:
|
||||
|
||||
```cmd
|
||||
--root="/home/lstein/invokeai " # wrong! no whitespace here
|
||||
--root="/home\lstein\invokeai\" # wrong! shouldn't end in a slash
|
||||
```
|
||||
|
||||
Fix the problem with your text editor and save as a **plain text**
|
||||
file. This should clear the issue.
|
||||
|
||||
_If none of these maneuvers fixes the problem_ then please report the
|
||||
problem to the [InvokeAI
|
||||
Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
||||
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive assistance.
|
||||
|
||||
## Updating to newer versions
|
||||
|
||||
This section describes how to update InvokeAI to new versions of the software.
|
||||
|
||||
### Updating the stable version
|
||||
|
||||
This distribution is changing rapidly, and we add new features on a daily basis.
|
||||
To update to the latest released version (recommended), run the `update.sh`
|
||||
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
||||
release and re-run the `configure_invokeai` script to download any updated models
|
||||
files that may be needed. You can also use this to add additional models that
|
||||
you did not select at installation time.
|
||||
|
||||
You can now close the developer console and run `invoke` as before. If you get
|
||||
complaints about missing models, then you may need to do the additional step of
|
||||
running `configure_invokeai.py`. This happens relatively infrequently. To do this,
|
||||
simply open up the developer's console again and type
|
||||
`python scripts/configure_invokeai.py`.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you run into problems during or after installation, the InvokeAI team is
|
||||
available to help you. Either create an
|
||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
||||
make a request for help on the "bugs-and-support" channel of our
|
||||
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
||||
organization, but typically somebody will be available to help you within 24
|
||||
hours, and often much sooner.
|
@ -5,29 +5,58 @@ title: Overview
|
||||
We offer several ways to install InvokeAI, each one suited to your
|
||||
experience and preferences.
|
||||
|
||||
1. [Automated Installer](010_INSTALL_AUTOMATED.md)
|
||||
1. [InvokeAI installer](INSTALL_INVOKE.md)
|
||||
|
||||
This is a script that will install all of InvokeAI's essential
|
||||
third party libraries and InvokeAI itself. It includes access to a
|
||||
"developer console" which will help us debug problems with you and
|
||||
give you to access experimental features.
|
||||
This is a installer script that installs InvokeAI and all the
|
||||
third party libraries it depends on. When a new version of
|
||||
InvokeAI is released, you will download and reinstall the new
|
||||
version.
|
||||
|
||||
2. [Manual Installation](020_INSTALL_MANUAL.md)
|
||||
This installer is designed for people who want the system to "just
|
||||
work", don't have an interest in tinkering with it, and do not
|
||||
care about upgrading to unreleased experimental features.
|
||||
|
||||
**Important Caveats**
|
||||
- This script does not support AMD GPUs. For Linux AMD support,
|
||||
please use the manual or source code installer methods.
|
||||
- This script has difficulty on some Macintosh machines
|
||||
that have previously been used for Python development due to
|
||||
conflicting development tools versions. Mac developers may wish
|
||||
to try the source code installer or one of the manual methods instead.
|
||||
|
||||
2. [Source code installer](INSTALL_SOURCE.md)
|
||||
|
||||
This is a script that will install InvokeAI and all its essential
|
||||
third party libraries. In contrast to the previous installer, it
|
||||
includes access to a "developer console" which will allow you to
|
||||
access experimental features on the development branch.
|
||||
|
||||
This method is recommended for individuals who are wish to stay
|
||||
on the cutting edge of InvokeAI development and are not afraid
|
||||
of occasional breakage.
|
||||
|
||||
3. [Manual Installation](INSTALL_MANUAL.md)
|
||||
|
||||
In this method you will manually run the commands needed to install
|
||||
InvokeAI and its dependencies. We offer two recipes: one suited to
|
||||
those who prefer the `conda` tool, and one suited to those who prefer
|
||||
`pip` and Python virtual environments. In our hands the pip install
|
||||
is faster and more reliable, but your mileage may vary.
|
||||
`pip` and Python virtual environments.
|
||||
|
||||
This method is recommended for users who have previously used `conda`
|
||||
or `pip` in the past, developers, and anyone who wishes to remain on
|
||||
the cutting edge of future InvokeAI development and is willing to put
|
||||
up with occasional glitches and breakage.
|
||||
|
||||
3. [Docker Installation](040_INSTALL_DOCKER.md)
|
||||
4. [Docker Installation](INSTALL_DOCKER.md)
|
||||
|
||||
We also offer a method for creating Docker containers containing
|
||||
InvokeAI and its dependencies. This method is recommended for
|
||||
individuals with experience with Docker containers and understand
|
||||
the pluses and minuses of a container-based install.
|
||||
|
||||
5. [Jupyter Notebooks Installation](INSTALL_JUPYTER.md)
|
||||
|
||||
This method is suitable for running InvokeAI on a Google Colab
|
||||
account. It is recommended for individuals who have previously
|
||||
worked on the Colab and are comfortable with the Jupyter notebook
|
||||
environment.
|
||||
|
@ -69,7 +69,7 @@ title: Manual Installation, Linux
|
||||
machine-learning models:
|
||||
|
||||
```bash
|
||||
(invokeai) ~/InvokeAI$ python3 scripts/configure_invokeai.py
|
||||
(invokeai) ~/InvokeAI$ python3 scripts/preload_models.py
|
||||
```
|
||||
|
||||
!!! note
|
||||
@ -79,7 +79,7 @@ title: Manual Installation, Linux
|
||||
and obtaining an access token for downloading. It will then download and
|
||||
install the weights files for you.
|
||||
|
||||
Please look [here](../INSTALL_MANUAL.md) for a manual process for doing
|
||||
Please look [here](INSTALLING_MODELS.md) for a manual process for doing
|
||||
the same thing.
|
||||
|
||||
7. Start generating images!
|
||||
@ -112,7 +112,7 @@ title: Manual Installation, Linux
|
||||
To use an alternative model you may invoke the `!switch` command in
|
||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||
either the CLI or the Web UI. See [Command Line
|
||||
Client](../../features/CLI.md#model-selection-and-importation). The
|
||||
Client](../features/CLI.md#model-selection-and-importation). The
|
||||
model names are defined in `configs/models.yaml`.
|
||||
|
||||
8. Subsequently, to relaunch the script, be sure to run "conda activate
|
@ -111,7 +111,7 @@ will do our best to help.
|
||||
|
||||
!!! todo "Download the model weight files"
|
||||
|
||||
The `configure_invokeai.py` script downloads and installs the model weight
|
||||
The `preload_models.py` script downloads and installs the model weight
|
||||
files for you. It will lead you through the process of getting a Hugging Face
|
||||
account, accepting the Stable Diffusion model weight license agreement, and
|
||||
creating a download token:
|
||||
@ -119,7 +119,7 @@ will do our best to help.
|
||||
```bash
|
||||
# This will take some time, depending on the speed of your internet connection
|
||||
# and will consume about 10GB of space
|
||||
python scripts/configure_invokeai.py
|
||||
python scripts/preload_models.py
|
||||
```
|
||||
|
||||
!!! todo "Run InvokeAI!"
|
||||
@ -150,7 +150,7 @@ will do our best to help.
|
||||
To use an alternative model you may invoke the `!switch` command in
|
||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||
either the CLI or the Web UI. See [Command Line
|
||||
Client](../../features/CLI.md#model-selection-and-importation). The
|
||||
Client](../features/CLI.md#model-selection-and-importation). The
|
||||
model names are defined in `configs/models.yaml`.
|
||||
|
||||
---
|
||||
@ -220,8 +220,8 @@ There are several causes of these errors:
|
||||
with "(invokeai)" then you activated it. If it begins with "(base)" or
|
||||
something else you haven't.
|
||||
|
||||
2. You might've run `./scripts/configure_invokeai.py` or `./scripts/invoke.py`
|
||||
instead of `python ./scripts/configure_invokeai.py` or
|
||||
2. You might've run `./scripts/preload_models.py` or `./scripts/invoke.py`
|
||||
instead of `python ./scripts/preload_models.py` or
|
||||
`python ./scripts/invoke.py`. The cause of this error is long so it's below.
|
||||
|
||||
<!-- I could not find out where the error is, otherwise would have marked it as a footnote -->
|
||||
@ -359,7 +359,7 @@ python ./scripts/txt2img.py \
|
||||
### OSError: Can't load tokenizer for 'openai/clip-vit-large-patch14'
|
||||
|
||||
```bash
|
||||
python scripts/configure_invokeai.py
|
||||
python scripts/preload_models.py
|
||||
```
|
||||
|
||||
---
|
@ -7,7 +7,7 @@ title: Manual Installation, Windows
|
||||
## **Notebook install (semi-automated)**
|
||||
|
||||
We have a
|
||||
[Jupyter notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
|
||||
[Jupyter notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable-Diffusion-local-Windows.ipynb)
|
||||
with cell-by-cell installation steps. It will download the code in this repo as
|
||||
one of the steps, so instead of cloning this repo, simply download the notebook
|
||||
from the link above and load it up in VSCode (with the appropriate extensions
|
||||
@ -65,7 +65,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
||||
7. Load the big stable diffusion weights files and a couple of smaller machine-learning models:
|
||||
|
||||
```bash
|
||||
python scripts/configure_invokeai.py
|
||||
python scripts/preload_models.py
|
||||
```
|
||||
|
||||
!!! note
|
||||
@ -75,7 +75,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
||||
obtaining an access token for downloading. It will then download and install the
|
||||
weights files for you.
|
||||
|
||||
Please look [here](../INSTALL_MANUAL.md) for a manual process for doing the
|
||||
Please look [here](INSTALLING_MODELS.md) for a manual process for doing the
|
||||
same thing.
|
||||
|
||||
8. Start generating images!
|
||||
@ -108,7 +108,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
||||
To use an alternative model you may invoke the `!switch` command in
|
||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||
either the CLI or the Web UI. See [Command Line
|
||||
Client](../../features/CLI.md#model-selection-and-importation). The
|
||||
Client](../features/CLI.md#model-selection-and-importation). The
|
||||
model names are defined in `configs/models.yaml`.
|
||||
|
||||
9. Subsequently, to relaunch the script, first activate the Anaconda
|
@ -3,10 +3,10 @@ info:
|
||||
title: Stable Diffusion
|
||||
description: |-
|
||||
TODO: Description Here
|
||||
|
||||
|
||||
Some useful links:
|
||||
- [Stable Diffusion Dream Server](https://github.com/lstein/stable-diffusion)
|
||||
|
||||
|
||||
license:
|
||||
name: MIT License
|
||||
url: https://github.com/lstein/stable-diffusion/blob/main/LICENSE
|
||||
@ -36,7 +36,7 @@ paths:
|
||||
description: successful operation
|
||||
content:
|
||||
image/png:
|
||||
schema:
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
'404':
|
||||
@ -66,7 +66,7 @@ paths:
|
||||
description: successful operation
|
||||
content:
|
||||
image/png:
|
||||
schema:
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
'404':
|
||||
|
@ -15,17 +15,16 @@ We thank them for all of their time and hard work.
|
||||
|
||||
## **Current core team**
|
||||
|
||||
* @lstein (Lincoln Stein) - Co-maintainer
|
||||
* @blessedcoolant - Co-maintainer
|
||||
* @hipsterusername (Kent Keirsey) - Product Manager
|
||||
* @psychedelicious - Web Team Leader
|
||||
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
||||
* @damian0815 - Attention Systems and Gameplay Engineer
|
||||
* @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
|
||||
* @Netsvetaev (Artur Netsvetaev) - UI/UX Developer
|
||||
* @tildebyte - general gadfly and resident (self-appointed) know-it-all
|
||||
* @keturn - Lead for Diffusers port
|
||||
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||
* lstein (Lincoln Stein) - Co-maintainer
|
||||
* blessedcoolant - Co-maintainer
|
||||
* hipsterusername (Kent Keirsey) - Product Manager
|
||||
* psychedelicious - Web Team Leader
|
||||
* Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
||||
* damian0815 - Attention Systems and Gameplay Engineer
|
||||
* mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
|
||||
* Netsvetaev (Artur Netsvetaev) - UI/UX Developer
|
||||
* tildebyte - general gadfly and resident (self-appointed) know-it-all
|
||||
* keturn - Lead for Diffusers port
|
||||
|
||||
## **Contributions by**
|
||||
|
||||
@ -75,7 +74,6 @@ We thank them for all of their time and hard work.
|
||||
- [Kent Keirsey](https://github.com/hipsterusername)
|
||||
- [psychedelicious](https://github.com/psychedelicious)
|
||||
- [damian0815](https://github.com/damian0815)
|
||||
- [Eugene Brodsky](https://github.com/ebr)
|
||||
|
||||
## **Original CompVis Authors**
|
||||
|
||||
|
@ -1,5 +1,3 @@
|
||||
mkdocs
|
||||
mkdocs-material>=8, <9
|
||||
mkdocs-git-revision-date-localized-plugin
|
||||
mkdocs-redirects==1.2.0
|
||||
|
||||
|
@ -30,6 +30,7 @@ dependencies:
|
||||
- torchvision
|
||||
- transformers=4.21.3
|
||||
- pip:
|
||||
- dependency_injector==4.40.0
|
||||
- getpass_asterisk
|
||||
- omegaconf==2.1.1
|
||||
- picklescan
|
||||
@ -41,5 +42,5 @@ dependencies:
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
||||
- -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e .
|
||||
|
@ -10,6 +10,7 @@ dependencies:
|
||||
- pip:
|
||||
- --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||
- albumentations==0.4.3
|
||||
- dependency_injector==4.40.0
|
||||
- diffusers==0.6.0
|
||||
- einops==0.3.0
|
||||
- eventlet
|
||||
@ -43,5 +44,5 @@ dependencies:
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
||||
- -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e .
|
||||
|
@ -13,6 +13,7 @@ dependencies:
|
||||
- cudatoolkit=11.6
|
||||
- pip:
|
||||
- albumentations==0.4.3
|
||||
- dependency_injector==4.40.0
|
||||
- diffusers==0.6.0
|
||||
- einops==0.3.0
|
||||
- eventlet
|
||||
@ -42,5 +43,5 @@ dependencies:
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
||||
- -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e .
|
||||
|
@ -59,7 +59,7 @@ dependencies:
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
||||
- -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e .
|
||||
variables:
|
||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
||||
|
@ -13,6 +13,8 @@ dependencies:
|
||||
- cudatoolkit=11.6
|
||||
- pip:
|
||||
- albumentations==0.4.3
|
||||
- basicsr==1.4.1
|
||||
- dependency_injector==4.40.0
|
||||
- diffusers==0.6.0
|
||||
- einops==0.3.0
|
||||
- eventlet
|
||||
@ -42,5 +44,5 @@ dependencies:
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.1#egg=gfpgan
|
||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
||||
- -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
- -e .
|
||||
|
@ -1,23 +1,22 @@
|
||||
# pip will resolve the version which matches torch
|
||||
albumentations
|
||||
diffusers==0.10.*
|
||||
dependency_injector==4.40.0
|
||||
diffusers
|
||||
einops
|
||||
eventlet
|
||||
facexlib
|
||||
flask==2.1.3
|
||||
flask_cors==3.0.10
|
||||
flask_socketio==5.3.0
|
||||
flaskwebgui==1.0.3
|
||||
flaskwebgui==0.3.7
|
||||
getpass_asterisk
|
||||
gfpgan==1.3.8
|
||||
huggingface-hub
|
||||
imageio
|
||||
imageio-ffmpeg
|
||||
kornia
|
||||
numpy==1.23.*
|
||||
numpy
|
||||
omegaconf
|
||||
opencv-python
|
||||
picklescan
|
||||
pillow
|
||||
pip>=22
|
||||
pudb
|
||||
@ -32,9 +31,11 @@ taming-transformers-rom1504
|
||||
test-tube>=0.7.5
|
||||
torch-fidelity
|
||||
torchmetrics
|
||||
transformers==4.25.*
|
||||
https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip#egg=k-diffusion
|
||||
https://github.com/invoke-ai/PyPatchMatch/archive/refs/tags/0.1.5.zip#egg=pypatchmatch
|
||||
# https://github.com/invoke-ai/GFPGAN/archive/refs/heads/master.zip#egg=gfpgan
|
||||
https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip#egg=clip
|
||||
https://github.com/invoke-ai/clipseg/archive/relaxed-python-requirement.zip#egg=clipseg
|
||||
transformers==4.21.*
|
||||
picklescan
|
||||
git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.1#egg=gfpgan ; platform_system == 'Windows'
|
||||
git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan ; platform_system != 'Windows'
|
||||
git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||
git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
|
@ -1,5 +1,2 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
torch
|
||||
torchvision
|
||||
-e .
|
||||
|
@ -1,6 +1,7 @@
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
# Get hardware-appropriate torch/torchvision
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org
|
||||
basicsr==1.4.1
|
||||
torch==1.12.1
|
||||
torchvision==0.13.1
|
||||
-e .
|
||||
|
@ -1,13 +1,6 @@
|
||||
module.exports = {
|
||||
extends: [
|
||||
'eslint:recommended',
|
||||
'plugin:@typescript-eslint/recommended',
|
||||
'plugin:react-hooks/recommended',
|
||||
],
|
||||
extends: ['eslint:recommended', 'plugin:@typescript-eslint/recommended', 'plugin:react-hooks/recommended'],
|
||||
parser: '@typescript-eslint/parser',
|
||||
plugins: ['@typescript-eslint', 'eslint-plugin-react-hooks'],
|
||||
root: true,
|
||||
rules: {
|
||||
'@typescript-eslint/no-unused-vars': ['warn', { varsIgnorePattern: '_+' }],
|
||||
},
|
||||
};
|
||||
|
48
frontend/dist/assets/index-legacy-5c5a479d.js
vendored
48
frontend/dist/assets/index-legacy-5c5a479d.js
vendored
File diff suppressed because one or more lines are too long
1
frontend/dist/assets/index.0dadf5d0.css
vendored
1
frontend/dist/assets/index.0dadf5d0.css
vendored
File diff suppressed because one or more lines are too long
1
frontend/dist/assets/index.c609c0c8.css
vendored
Normal file
1
frontend/dist/assets/index.c609c0c8.css
vendored
Normal file
File diff suppressed because one or more lines are too long
625
frontend/dist/assets/index.ec2d89c6.js
vendored
625
frontend/dist/assets/index.ec2d89c6.js
vendored
File diff suppressed because one or more lines are too long
623
frontend/dist/assets/index.faf4c870.js
vendored
Normal file
623
frontend/dist/assets/index.faf4c870.js
vendored
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
1
frontend/dist/assets/polyfills.1ff60148.js
vendored
1
frontend/dist/assets/polyfills.1ff60148.js
vendored
File diff suppressed because one or more lines are too long
33
frontend/dist/index.html
vendored
33
frontend/dist/index.html
vendored
@ -1,23 +1,18 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<script type="module" crossorigin src="./assets/polyfills.1ff60148.js"></script>
|
||||
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon.0d253ced.ico" />
|
||||
<script type="module" crossorigin src="./assets/index.ec2d89c6.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.0dadf5d0.css">
|
||||
<script type="module">try{import.meta.url;import("_").catch(()=>1);}catch(e){}window.__vite_is_modern_browser=true;</script>
|
||||
<script type="module">!function(){if(window.__vite_is_modern_browser)return;console.warn("vite: loading legacy build because dynamic import or import.meta.url is unsupported, syntax error above should be ignored");var e=document.getElementById("vite-legacy-polyfill"),n=document.createElement("script");n.src=e.src,n.onload=function(){System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))},document.body.appendChild(n)}();</script>
|
||||
</head>
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon.0d253ced.ico" />
|
||||
<script type="module" crossorigin src="./assets/index.faf4c870.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.c609c0c8.css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="root"></div>
|
||||
|
||||
<script nomodule>!function(){var e=document,t=e.createElement("script");if(!("noModule"in t)&&"onbeforeload"in t){var n=!1;e.addEventListener("beforeload",(function(e){if(e.target===t)n=!0;else if(!e.target.hasAttribute("nomodule")||!n)return;e.preventDefault()}),!0),t.type="module",t.src=".",e.head.appendChild(t),t.remove()}}();</script>
|
||||
<script nomodule crossorigin id="vite-legacy-polyfill" src="./assets/polyfills-legacy-dde3a68a.js"></script>
|
||||
<script nomodule crossorigin id="vite-legacy-entry" data-src="./assets/index-legacy-5c5a479d.js">System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))</script>
|
||||
</body>
|
||||
</html>
|
||||
<body>
|
||||
<div id="root"></div>
|
||||
|
||||
</body>
|
||||
|
||||
</html>
|
55
frontend/dist/locales/common/de.json
vendored
55
frontend/dist/locales/common/de.json
vendored
@ -1,55 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"themeLabel": "Thema",
|
||||
"languagePickerLabel": "Sprachauswahl",
|
||||
"reportBugLabel": "Fehler melden",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Einstellungen",
|
||||
"darkTheme": "Dunkel",
|
||||
"lightTheme": "Hell",
|
||||
"greenTheme": "Grün",
|
||||
"langEnglish": "Englisch",
|
||||
"langRussian": "Russisch",
|
||||
"langItalian": "Italienisch",
|
||||
"langPortuguese": "Portugiesisch",
|
||||
"langFrench": "Französich",
|
||||
"langGerman": "Deutsch",
|
||||
"langSpanish": "Spanisch",
|
||||
"text2img": "Text zu Bild",
|
||||
"img2img": "Bild zu Bild",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
"nodes": "Knoten",
|
||||
"nodesDesc": "Ein knotenbasiertes System, für die Erzeugung von Bildern, ist derzeit in der Entwicklung. Bleiben Sie gespannt auf Updates zu dieser fantastischen Funktion.",
|
||||
"postProcessing": "Nachbearbeitung",
|
||||
"postProcessDesc1": "InvokeAI bietet eine breite Palette von Nachbearbeitungsfunktionen. Bildhochskalierung und Gesichtsrekonstruktion sind bereits in der WebUI verfügbar. Sie können sie über das Menü Erweiterte Optionen der Reiter Text in Bild und Bild in Bild aufrufen. Sie können Bilder auch direkt bearbeiten, indem Sie die Schaltflächen für Bildaktionen oberhalb der aktuellen Bildanzeige oder im Viewer verwenden.",
|
||||
"postProcessDesc2": "Eine spezielle Benutzeroberfläche wird in Kürze veröffentlicht, um erweiterte Nachbearbeitungs-Workflows zu erleichtern.",
|
||||
"postProcessDesc3": "Die InvokeAI Kommandozeilen-Schnittstelle bietet verschiedene andere Funktionen, darunter Embiggen.",
|
||||
"training": "Training",
|
||||
"trainingDesc1": "Ein spezieller Arbeitsablauf zum Trainieren Ihrer eigenen Embeddings und Checkpoints mit Textual Inversion und Dreambooth über die Weboberfläche.",
|
||||
"trainingDesc2": "InvokeAI unterstützt bereits das Training von benutzerdefinierten Embeddings mit Textual Inversion unter Verwendung des Hauptskripts.",
|
||||
"upload": "Upload",
|
||||
"close": "Schließen",
|
||||
"load": "Laden",
|
||||
"statusConnected": "Verbunden",
|
||||
"statusDisconnected": "Getrennt",
|
||||
"statusError": "Fehler",
|
||||
"statusPreparing": "Vorbereiten",
|
||||
"statusProcessingCanceled": "Verarbeitung abgebrochen",
|
||||
"statusProcessingComplete": "Verarbeitung komplett",
|
||||
"statusGenerating": "Generieren",
|
||||
"statusGeneratingTextToImage": "Erzeugen von Text zu Bild",
|
||||
"statusGeneratingImageToImage": "Erzeugen von Bild zu Bild",
|
||||
"statusGeneratingInpainting": "Erzeuge Inpainting",
|
||||
"statusGeneratingOutpainting": "Erzeuge Outpainting",
|
||||
"statusGenerationComplete": "Generierung abgeschlossen",
|
||||
"statusIterationComplete": "Iteration abgeschlossen",
|
||||
"statusSavingImage": "Speichere Bild",
|
||||
"statusRestoringFaces": "Gesichter restaurieren",
|
||||
"statusRestoringFacesGFPGAN": "Gesichter restaurieren (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Gesichter restaurieren (CodeFormer)",
|
||||
"statusUpscaling": "Hochskalierung",
|
||||
"statusUpscalingESRGAN": "Hochskalierung (ESRGAN)",
|
||||
"statusLoadingModel": "Laden des Modells",
|
||||
"statusModelChanged": "Modell Geändert"
|
||||
}
|
56
frontend/dist/locales/common/en-US.json
vendored
56
frontend/dist/locales/common/en-US.json
vendored
@ -1,56 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"themeLabel": "Theme",
|
||||
"languagePickerLabel": "Language Picker",
|
||||
"reportBugLabel": "Report Bug",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Settings",
|
||||
"darkTheme": "Dark",
|
||||
"lightTheme": "Light",
|
||||
"greenTheme": "Green",
|
||||
"langEnglish": "English",
|
||||
"langRussian": "Russian",
|
||||
"langItalian": "Italian",
|
||||
"langBrPortuguese": "Portuguese (Brazilian)",
|
||||
"langGerman": "German",
|
||||
"langPortuguese": "Portuguese",
|
||||
"langFrench": "French",
|
||||
"langPolish": "Polish",
|
||||
"text2img": "Text To Image",
|
||||
"img2img": "Image To Image",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
"nodes": "Nodes",
|
||||
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
|
||||
"postProcessing": "Post Processing",
|
||||
"postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.",
|
||||
"postProcessDesc2": "A dedicated UI will be released soon to facilitate more advanced post processing workflows.",
|
||||
"postProcessDesc3": "The Invoke AI Command Line Interface offers various other features including Embiggen.",
|
||||
"training": "Training",
|
||||
"trainingDesc1": "A dedicated workflow for training your own embeddings and checkpoints using Textual Inversion and Dreambooth from the web interface.",
|
||||
"trainingDesc2": "InvokeAI already supports training custom embeddings using Textual Inversion using the main script.",
|
||||
"upload": "Upload",
|
||||
"close": "Close",
|
||||
"load": "Load",
|
||||
"statusConnected": "Connected",
|
||||
"statusDisconnected": "Disconnected",
|
||||
"statusError": "Error",
|
||||
"statusPreparing": "Preparing",
|
||||
"statusProcessingCanceled": "Processing Canceled",
|
||||
"statusProcessingComplete": "Processing Complete",
|
||||
"statusGenerating": "Generating",
|
||||
"statusGeneratingTextToImage": "Generating Text To Image",
|
||||
"statusGeneratingImageToImage": "Generating Image To Image",
|
||||
"statusGeneratingInpainting": "Generating Inpainting",
|
||||
"statusGeneratingOutpainting": "Generating Outpainting",
|
||||
"statusGenerationComplete": "Generation Complete",
|
||||
"statusIterationComplete": "Iteration Complete",
|
||||
"statusSavingImage": "Saving Image",
|
||||
"statusRestoringFaces": "Restoring Faces",
|
||||
"statusRestoringFacesGFPGAN": "Restoring Faces (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restoring Faces (CodeFormer)",
|
||||
"statusUpscaling": "Upscaling",
|
||||
"statusUpscalingESRGAN": "Upscaling (ESRGAN)",
|
||||
"statusLoadingModel": "Loading Model",
|
||||
"statusModelChanged": "Model Changed"
|
||||
}
|
58
frontend/dist/locales/common/en.json
vendored
58
frontend/dist/locales/common/en.json
vendored
@ -1,58 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"themeLabel": "Theme",
|
||||
"languagePickerLabel": "Language Picker",
|
||||
"reportBugLabel": "Report Bug",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Settings",
|
||||
"darkTheme": "Dark",
|
||||
"lightTheme": "Light",
|
||||
"greenTheme": "Green",
|
||||
"langEnglish": "English",
|
||||
"langRussian": "Russian",
|
||||
"langItalian": "Italian",
|
||||
"langBrPortuguese": "Portuguese (Brazilian)",
|
||||
"langGerman": "German",
|
||||
"langPortuguese": "Portuguese",
|
||||
"langFrench": "French",
|
||||
"langPolish": "Polish",
|
||||
"langSimplifiedChinese": "Simplified Chinese",
|
||||
"langSpanish": "Spanish",
|
||||
"text2img": "Text To Image",
|
||||
"img2img": "Image To Image",
|
||||
"unifiedCanvas": "Unified Canvas",
|
||||
"nodes": "Nodes",
|
||||
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
|
||||
"postProcessing": "Post Processing",
|
||||
"postProcessDesc1": "Invoke AI offers a wide variety of post processing features. Image Upscaling and Face Restoration are already available in the WebUI. You can access them from the Advanced Options menu of the Text To Image and Image To Image tabs. You can also process images directly, using the image action buttons above the current image display or in the viewer.",
|
||||
"postProcessDesc2": "A dedicated UI will be released soon to facilitate more advanced post processing workflows.",
|
||||
"postProcessDesc3": "The Invoke AI Command Line Interface offers various other features including Embiggen.",
|
||||
"training": "Training",
|
||||
"trainingDesc1": "A dedicated workflow for training your own embeddings and checkpoints using Textual Inversion and Dreambooth from the web interface.",
|
||||
"trainingDesc2": "InvokeAI already supports training custom embeddings using Textual Inversion using the main script.",
|
||||
"upload": "Upload",
|
||||
"close": "Close",
|
||||
"load": "Load",
|
||||
"statusConnected": "Connected",
|
||||
"statusDisconnected": "Disconnected",
|
||||
"statusError": "Error",
|
||||
"statusPreparing": "Preparing",
|
||||
"statusProcessingCanceled": "Processing Canceled",
|
||||
"statusProcessingComplete": "Processing Complete",
|
||||
"statusGenerating": "Generating",
|
||||
"statusGeneratingTextToImage": "Generating Text To Image",
|
||||
"statusGeneratingImageToImage": "Generating Image To Image",
|
||||
"statusGeneratingInpainting": "Generating Inpainting",
|
||||
"statusGeneratingOutpainting": "Generating Outpainting",
|
||||
"statusGenerationComplete": "Generation Complete",
|
||||
"statusIterationComplete": "Iteration Complete",
|
||||
"statusSavingImage": "Saving Image",
|
||||
"statusRestoringFaces": "Restoring Faces",
|
||||
"statusRestoringFacesGFPGAN": "Restoring Faces (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restoring Faces (CodeFormer)",
|
||||
"statusUpscaling": "Upscaling",
|
||||
"statusUpscalingESRGAN": "Upscaling (ESRGAN)",
|
||||
"statusLoadingModel": "Loading Model",
|
||||
"statusModelChanged": "Model Changed"
|
||||
}
|
58
frontend/dist/locales/common/es.json
vendored
58
frontend/dist/locales/common/es.json
vendored
@ -1,58 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Atajos de teclado",
|
||||
"themeLabel": "Tema",
|
||||
"languagePickerLabel": "Selector de idioma",
|
||||
"reportBugLabel": "Reportar errores",
|
||||
"githubLabel": "GitHub",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Ajustes",
|
||||
"darkTheme": "Oscuro",
|
||||
"lightTheme": "Claro",
|
||||
"greenTheme": "Verde",
|
||||
"langEnglish": "Inglés",
|
||||
"langRussian": "Ruso",
|
||||
"langItalian": "Italiano",
|
||||
"langBrPortuguese": "Portugués (Brasil)",
|
||||
"langGerman": "Alemán",
|
||||
"langPortuguese": "Portugués",
|
||||
"langFrench": "French",
|
||||
"langPolish": "Polish",
|
||||
"langSpanish": "Español",
|
||||
"text2img": "Texto a Imagen",
|
||||
"img2img": "Imagen a Imagen",
|
||||
"unifiedCanvas": "Lienzo Unificado",
|
||||
"nodes": "Nodos",
|
||||
"nodesDesc": "Un sistema de generación de imágenes basado en nodos, actualmente se encuentra en desarrollo. Mantente pendiente a nuestras actualizaciones acerca de esta fabulosa funcionalidad.",
|
||||
"postProcessing": "Post-procesamiento",
|
||||
"postProcessDesc1": "Invoke AI ofrece una gran variedad de funciones de post-procesamiento, El aumento de tamaño y Restauración de Rostros ya se encuentran disponibles en la interfaz web, puedes acceder desde el menú de Opciones Avanzadas en las pestañas de Texto a Imagen y de Imagen a Imagen. También puedes acceder a estas funciones directamente mediante el botón de acciones en el menú superior de la imagen actual o en el visualizador",
|
||||
"postProcessDesc2": "Una interfaz de usuario dedicada se lanzará pronto para facilitar flujos de trabajo de postprocesamiento más avanzado.",
|
||||
"postProcessDesc3": "La Interfaz de Línea de Comandos de Invoke AI ofrece muchas otras características, incluyendo -Embiggen-.",
|
||||
"training": "Entrenamiento",
|
||||
"trainingDesc1": "Un flujo de trabajo dedicado para el entrenamiento de sus propios -embeddings- y puntos de control utilizando Inversión Textual y Dreambooth desde la interfaz web.",
|
||||
"trainingDesc2": "InvokeAI already supports training custom embeddings using Textual Inversion using the main script.",
|
||||
"trainingDesc2": "InvokeAI ya soporta el entrenamiento de -embeddings- personalizados utilizando la Inversión Textual mediante el script principal.",
|
||||
"upload": "Subir imagen",
|
||||
"close": "Cerrar",
|
||||
"load": "Cargar",
|
||||
"statusConnected": "Conectado",
|
||||
"statusDisconnected": "Desconectado",
|
||||
"statusError": "Error",
|
||||
"statusPreparing": "Preparando",
|
||||
"statusProcessingCanceled": "Procesamiento Cancelado",
|
||||
"statusProcessingComplete": "Procesamiento Completo",
|
||||
"statusGenerating": "Generando",
|
||||
"statusGeneratingTextToImage": "Generando Texto a Imagen",
|
||||
"statusGeneratingImageToImage": "Generando Imagen a Imagen",
|
||||
"statusGeneratingInpainting": "Generando pintura interior",
|
||||
"statusGeneratingOutpainting": "Generando pintura exterior",
|
||||
"statusGenerationComplete": "Generación Completa",
|
||||
"statusIterationComplete": "Iteración Completa",
|
||||
"statusSavingImage": "Guardando Imagen",
|
||||
"statusRestoringFaces": "Restaurando Rostros",
|
||||
"statusRestoringFacesGFPGAN": "Restaurando Rostros (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restaurando Rostros (CodeFormer)",
|
||||
"statusUpscaling": "Aumentando Tamaño",
|
||||
"statusUpscalingESRGAN": "Restaurando Rostros(ESRGAN)",
|
||||
"statusLoadingModel": "Cargando Modelo",
|
||||
"statusModelChanged": "Modelo cambiado"
|
||||
}
|
1
frontend/dist/locales/common/fr.json
vendored
1
frontend/dist/locales/common/fr.json
vendored
@ -1 +0,0 @@
|
||||
{}
|
58
frontend/dist/locales/common/it.json
vendored
58
frontend/dist/locales/common/it.json
vendored
@ -1,58 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Tasti di scelta rapida",
|
||||
"themeLabel": "Tema",
|
||||
"languagePickerLabel": "Seleziona lingua",
|
||||
"reportBugLabel": "Segnala un errore",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Impostazioni",
|
||||
"darkTheme": "Scuro",
|
||||
"lightTheme": "Chiaro",
|
||||
"greenTheme": "Verde",
|
||||
"langEnglish": "Inglese",
|
||||
"langRussian": "Russo",
|
||||
"langItalian": "Italiano",
|
||||
"langBrPortuguese": "Portoghese (Brasiliano)",
|
||||
"langGerman": "Tedesco",
|
||||
"langPortuguese": "Portoghese",
|
||||
"langFrench": "Francese",
|
||||
"langPolish": "Polacco",
|
||||
"langSimplifiedChinese": "Cinese semplificato",
|
||||
"langSpanish": "Spagnolo",
|
||||
"text2img": "Testo a Immagine",
|
||||
"img2img": "Immagine a Immagine",
|
||||
"unifiedCanvas": "Tela unificata",
|
||||
"nodes": "Nodi",
|
||||
"nodesDesc": "Attualmente è in fase di sviluppo un sistema basato su nodi per la generazione di immagini. Resta sintonizzato per gli aggiornamenti su questa fantastica funzionalità.",
|
||||
"postProcessing": "Post-elaborazione",
|
||||
"postProcessDesc1": "Invoke AI offre un'ampia varietà di funzionalità di post-elaborazione. Ampiamento Immagine e Restaura i Volti sono già disponibili nell'interfaccia Web. È possibile accedervi dal menu 'Opzioni avanzate' delle schede 'Testo a Immagine' e 'Immagine a Immagine'. È inoltre possibile elaborare le immagini direttamente, utilizzando i pulsanti di azione dell'immagine sopra la visualizzazione dell'immagine corrente o nel visualizzatore.",
|
||||
"postProcessDesc2": "Presto verrà rilasciata un'interfaccia utente dedicata per facilitare flussi di lavoro di post-elaborazione più avanzati.",
|
||||
"postProcessDesc3": "L'interfaccia da riga di comando di 'Invoke AI' offre varie altre funzionalità tra cui Embiggen.",
|
||||
"training": "Addestramento",
|
||||
"trainingDesc1": "Un flusso di lavoro dedicato per addestrare i tuoi incorporamenti e checkpoint utilizzando Inversione Testuale e Dreambooth dall'interfaccia web.",
|
||||
"trainingDesc2": "InvokeAI supporta già l'addestramento di incorporamenti personalizzati utilizzando l'inversione testuale utilizzando lo script principale.",
|
||||
"upload": "Caricamento",
|
||||
"close": "Chiudi",
|
||||
"load": "Carica",
|
||||
"statusConnected": "Collegato",
|
||||
"statusDisconnected": "Disconnesso",
|
||||
"statusError": "Errore",
|
||||
"statusPreparing": "Preparazione",
|
||||
"statusProcessingCanceled": "Elaborazione annullata",
|
||||
"statusProcessingComplete": "Elaborazione completata",
|
||||
"statusGenerating": "Generazione in corso",
|
||||
"statusGeneratingTextToImage": "Generazione da Testo a Immagine",
|
||||
"statusGeneratingImageToImage": "Generazione da Immagine a Immagine",
|
||||
"statusGeneratingInpainting": "Generazione Inpainting",
|
||||
"statusGeneratingOutpainting": "Generazione Outpainting",
|
||||
"statusGenerationComplete": "Generazione completata",
|
||||
"statusIterationComplete": "Iterazione completata",
|
||||
"statusSavingImage": "Salvataggio dell'immagine",
|
||||
"statusRestoringFaces": "Restaura i volti",
|
||||
"statusRestoringFacesGFPGAN": "Restaura volti (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restaura volti (CodeFormer)",
|
||||
"statusUpscaling": "Ampliamento",
|
||||
"statusUpscalingESRGAN": "Ampliamento (ESRGAN)",
|
||||
"statusLoadingModel": "Caricamento del modello",
|
||||
"statusModelChanged": "Modello cambiato"
|
||||
}
|
55
frontend/dist/locales/common/pl.json
vendored
55
frontend/dist/locales/common/pl.json
vendored
@ -1,55 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Skróty klawiszowe",
|
||||
"themeLabel": "Motyw",
|
||||
"languagePickerLabel": "Wybór języka",
|
||||
"reportBugLabel": "Zgłoś błąd",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Ustawienia",
|
||||
"darkTheme": "Ciemny",
|
||||
"lightTheme": "Jasny",
|
||||
"greenTheme": "Zielony",
|
||||
"langEnglish": "Angielski",
|
||||
"langRussian": "Rosyjski",
|
||||
"langItalian": "Włoski",
|
||||
"langPortuguese": "Portugalski",
|
||||
"langFrench": "Francuski",
|
||||
"langPolish": "Polski",
|
||||
"langSpanish": "Hiszpański",
|
||||
"text2img": "Tekst na obraz",
|
||||
"img2img": "Obraz na obraz",
|
||||
"unifiedCanvas": "Tryb uniwersalny",
|
||||
"nodes": "Węzły",
|
||||
"nodesDesc": "W tym miejscu powstanie graficzny system generowania obrazów oparty na węzłach. Jest na co czekać!",
|
||||
"postProcessing": "Przetwarzanie końcowe",
|
||||
"postProcessDesc1": "Invoke AI oferuje wiele opcji przetwarzania końcowego. Z poziomu przeglądarki dostępne jest już zwiększanie rozdzielczości oraz poprawianie twarzy. Znajdziesz je wśród ustawień w trybach \"Tekst na obraz\" oraz \"Obraz na obraz\". Są również obecne w pasku menu wyświetlanym nad podglądem wygenerowanego obrazu.",
|
||||
"postProcessDesc2": "Niedługo zostanie udostępniony specjalny interfejs, który będzie oferował jeszcze więcej możliwości.",
|
||||
"postProcessDesc3": "Z poziomu linii poleceń już teraz dostępne są inne opcje, takie jak skalowanie obrazu metodą Embiggen.",
|
||||
"training": "Trenowanie",
|
||||
"trainingDesc1": "W tym miejscu dostępny będzie system przeznaczony do tworzenia własnych zanurzeń (ang. embeddings) i punktów kontrolnych przy użyciu metod w rodzaju inwersji tekstowej lub Dreambooth.",
|
||||
"trainingDesc2": "Obecnie jest możliwe tworzenie własnych zanurzeń przy użyciu skryptów wywoływanych z linii poleceń.",
|
||||
"upload": "Prześlij",
|
||||
"close": "Zamknij",
|
||||
"load": "Załaduj",
|
||||
"statusConnected": "Połączono z serwerem",
|
||||
"statusDisconnected": "Odłączono od serwera",
|
||||
"statusError": "Błąd",
|
||||
"statusPreparing": "Przygotowywanie",
|
||||
"statusProcessingCanceled": "Anulowano przetwarzanie",
|
||||
"statusProcessingComplete": "Zakończono przetwarzanie",
|
||||
"statusGenerating": "Przetwarzanie",
|
||||
"statusGeneratingTextToImage": "Przetwarzanie tekstu na obraz",
|
||||
"statusGeneratingImageToImage": "Przetwarzanie obrazu na obraz",
|
||||
"statusGeneratingInpainting": "Przemalowywanie",
|
||||
"statusGeneratingOutpainting": "Domalowywanie",
|
||||
"statusGenerationComplete": "Zakończono generowanie",
|
||||
"statusIterationComplete": "Zakończono iterację",
|
||||
"statusSavingImage": "Zapisywanie obrazu",
|
||||
"statusRestoringFaces": "Poprawianie twarzy",
|
||||
"statusRestoringFacesGFPGAN": "Poprawianie twarzy (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Poprawianie twarzy (CodeFormer)",
|
||||
"statusUpscaling": "Powiększanie obrazu",
|
||||
"statusUpscalingESRGAN": "Powiększanie (ESRGAN)",
|
||||
"statusLoadingModel": "Wczytywanie modelu",
|
||||
"statusModelChanged": "Zmieniono model"
|
||||
}
|
1
frontend/dist/locales/common/pt.json
vendored
1
frontend/dist/locales/common/pt.json
vendored
@ -1 +0,0 @@
|
||||
{}
|
55
frontend/dist/locales/common/pt_br.json
vendored
55
frontend/dist/locales/common/pt_br.json
vendored
@ -1,55 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Teclas de atalho",
|
||||
"themeLabel": "Tema",
|
||||
"languagePickerLabel": "Seletor de Idioma",
|
||||
"reportBugLabel": "Relatar Bug",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Configurações",
|
||||
"darkTheme": "Noite",
|
||||
"lightTheme": "Dia",
|
||||
"greenTheme": "Verde",
|
||||
"langEnglish": "English",
|
||||
"langRussian": "Russian",
|
||||
"langItalian": "Italian",
|
||||
"langBrPortuguese": "Português do Brasil",
|
||||
"langPortuguese": "Portuguese",
|
||||
"langFrench": "French",
|
||||
"langSpanish": "Spanish",
|
||||
"text2img": "Texto Para Imagem",
|
||||
"img2img": "Imagem Para Imagem",
|
||||
"unifiedCanvas": "Tela Unificada",
|
||||
"nodes": "Nódulos",
|
||||
"nodesDesc": "Um sistema baseado em nódulos para geração de imagens está em contrução. Fique ligado para atualizações sobre essa funcionalidade incrível.",
|
||||
"postProcessing": "Pós-processamento",
|
||||
"postProcessDesc1": "Invoke AI oferece uma variedade e funcionalidades de pós-processamento. Redimensionador de Imagem e Restauração Facial já estão disponíveis na interface. Você pode acessar elas no menu de Opções Avançadas na aba de Texto para Imagem e Imagem para Imagem. Você também pode processar imagens diretamente, usando os botões de ação de imagem acima da atual tela de imagens ou visualizador.",
|
||||
"postProcessDesc2": "Uma interface dedicada será lançada em breve para facilitar fluxos de trabalho com opções mais avançadas de pós-processamento.",
|
||||
"postProcessDesc3": "A interface do comando de linha da Invoke oferece várias funcionalidades incluindo Ampliação.",
|
||||
"training": "Treinando",
|
||||
"trainingDesc1": "Um fluxo de trabalho dedicado para treinar suas próprias incorporações e chockpoints usando Inversão Textual e Dreambooth na interface web.",
|
||||
"trainingDesc2": "InvokeAI já suporta treinar incorporações personalizadas usando Inversão Textual com o script principal.",
|
||||
"upload": "Enviar",
|
||||
"close": "Fechar",
|
||||
"load": "Carregar",
|
||||
"statusConnected": "Conectado",
|
||||
"statusDisconnected": "Disconectado",
|
||||
"statusError": "Erro",
|
||||
"statusPreparing": "Preparando",
|
||||
"statusProcessingCanceled": "Processamento Canceledo",
|
||||
"statusProcessingComplete": "Processamento Completo",
|
||||
"statusGenerating": "Gerando",
|
||||
"statusGeneratingTextToImage": "Gerando Texto Para Imagem",
|
||||
"statusGeneratingImageToImage": "Gerando Imagem Para Imagem",
|
||||
"statusGeneratingInpainting": "Gerando Inpainting",
|
||||
"statusGeneratingOutpainting": "Gerando Outpainting",
|
||||
"statusGenerationComplete": "Geração Completa",
|
||||
"statusIterationComplete": "Iteração Completa",
|
||||
"statusSavingImage": "Salvando Imagem",
|
||||
"statusRestoringFaces": "Restaurando Rostos",
|
||||
"statusRestoringFacesGFPGAN": "Restaurando Rostos (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Restaurando Rostos (CodeFormer)",
|
||||
"statusUpscaling": "Redimensinando",
|
||||
"statusUpscalingESRGAN": "Redimensinando (ESRGAN)",
|
||||
"statusLoadingModel": "Carregando Modelo",
|
||||
"statusModelChanged": "Modelo Alterado"
|
||||
}
|
54
frontend/dist/locales/common/ru.json
vendored
54
frontend/dist/locales/common/ru.json
vendored
@ -1,54 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "Горячие клавиши",
|
||||
"themeLabel": "Тема",
|
||||
"languagePickerLabel": "Язык",
|
||||
"reportBugLabel": "Сообщить об ошибке",
|
||||
"githubLabel": "Github",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "Настройка",
|
||||
"darkTheme": "Темная",
|
||||
"lightTheme": "Светлая",
|
||||
"greenTheme": "Зеленая",
|
||||
"langEnglish": "English",
|
||||
"langRussian": "Русский",
|
||||
"langItalian": "Italian",
|
||||
"langPortuguese": "Portuguese",
|
||||
"langFrench": "French",
|
||||
"langSpanish": "Spanish",
|
||||
"text2img": "Изображение из текста (text2img)",
|
||||
"img2img": "Изображение в изображение (img2img)",
|
||||
"unifiedCanvas": "Универсальный холст",
|
||||
"nodes": "Ноды",
|
||||
"nodesDesc": "Cистема генерации изображений на основе нодов (узлов) уже разрабатывается. Следите за новостями об этой замечательной функции.",
|
||||
"postProcessing": "Постобработка",
|
||||
"postProcessDesc1": "Invoke AI предлагает широкий спектр функций постобработки. Увеличение изображения (upscale) и восстановление лиц уже доступны в интерфейсе. Получите доступ к ним из меню 'Дополнительные параметры' на вкладках 'Текст в изображение' и 'Изображение в изображение'. Обрабатывайте изображения напрямую, используя кнопки действий с изображениями над текущим изображением или в режиме просмотра.",
|
||||
"postProcessDesc2": "В ближайшее время будет выпущен специальный интерфейс для более продвинутых процессов постобработки.",
|
||||
"postProcessDesc3": "Интерфейс командной строки Invoke AI предлагает различные другие функции, включая увеличение Embiggen",
|
||||
"training": "Обучение",
|
||||
"trainingDesc1": "Специальный интерфейс для обучения собственных моделей с использованием Textual Inversion и Dreambooth",
|
||||
"trainingDesc2": "InvokeAI уже поддерживает обучение моделей с помощью TI, через интерфейс командной строки.",
|
||||
"upload": "Загрузить",
|
||||
"close": "Закрыть",
|
||||
"load": "Загрузить",
|
||||
"statusConnected": "Подключен",
|
||||
"statusDisconnected": "Отключен",
|
||||
"statusError": "Ошибка",
|
||||
"statusPreparing": "Подготовка",
|
||||
"statusProcessingCanceled": "Обработка прервана",
|
||||
"statusProcessingComplete": "Обработка завершена",
|
||||
"statusGenerating": "Генерация",
|
||||
"statusGeneratingTextToImage": "Создаем изображение из текста",
|
||||
"statusGeneratingImageToImage": "Создаем изображение из изображения",
|
||||
"statusGeneratingInpainting": "Дополняем внутри",
|
||||
"statusGeneratingOutpainting": "Дорисовываем снаружи",
|
||||
"statusGenerationComplete": "Генерация завершена",
|
||||
"statusIterationComplete": "Итерация завершена",
|
||||
"statusSavingImage": "Сохранение изображения",
|
||||
"statusRestoringFaces": "Восстановление лиц",
|
||||
"statusRestoringFacesGFPGAN": "Восстановление лиц (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "Восстановление лиц (CodeFormer)",
|
||||
"statusUpscaling": "Увеличение",
|
||||
"statusUpscalingESRGAN": "Увеличение (ESRGAN)",
|
||||
"statusLoadingModel": "Загрузка модели",
|
||||
"statusModelChanged": "Модель изменена"
|
||||
}
|
54
frontend/dist/locales/common/zh_cn.json
vendored
54
frontend/dist/locales/common/zh_cn.json
vendored
@ -1,54 +0,0 @@
|
||||
{
|
||||
"hotkeysLabel": "快捷键",
|
||||
"themeLabel": "主题",
|
||||
"languagePickerLabel": "语言",
|
||||
"reportBugLabel": "提交错误报告",
|
||||
"githubLabel": "GitHub",
|
||||
"discordLabel": "Discord",
|
||||
"settingsLabel": "设置",
|
||||
"darkTheme": "暗色",
|
||||
"lightTheme": "亮色",
|
||||
"greenTheme": "绿色",
|
||||
"langEnglish": "英语",
|
||||
"langRussian": "俄语",
|
||||
"langItalian": "意大利语",
|
||||
"langPortuguese": "葡萄牙语",
|
||||
"langFrench": "法语",
|
||||
"langChineseSimplified": "简体中文",
|
||||
"text2img": "文字到图像",
|
||||
"img2img": "图像到图像",
|
||||
"unifiedCanvas": "统一画布",
|
||||
"nodes": "节点",
|
||||
"nodesDesc": "一个基于节点的图像生成系统目前正在开发中。请持续关注关于这一功能的更新。",
|
||||
"postProcessing": "后期处理",
|
||||
"postProcessDesc1": "Invoke AI 提供各种各样的后期处理功能。图像放大和面部修复在网页界面中已经可用。你可以从文本到图像和图像到图像页面的高级选项菜单中访问它们。你也可以直接使用图像显示上方或查看器中的图像操作按钮处理图像。",
|
||||
"postProcessDesc2": "一个专门的界面将很快发布,新的界面能够处理更复杂的后期处理流程。",
|
||||
"postProcessDesc3": "Invoke AI 命令行界面提供例如Embiggen的各种其他功能。",
|
||||
"training": "训练",
|
||||
"trainingDesc1": "一个专门用于从网络UI使用Textual Inversion和Dreambooth训练自己的嵌入模型和检查点的工作流程。",
|
||||
"trainingDesc2": "InvokeAI已经支持使用主脚本中的Textual Inversion来训练自定义的嵌入模型。",
|
||||
"upload": "上传",
|
||||
"close": "关闭",
|
||||
"load": "加载",
|
||||
"statusConnected": "已连接",
|
||||
"statusDisconnected": "未连接",
|
||||
"statusError": "错误",
|
||||
"statusPreparing": "准备中",
|
||||
"statusProcessingCanceled": "处理取消",
|
||||
"statusProcessingComplete": "处理完成",
|
||||
"statusGenerating": "生成中",
|
||||
"statusGeneratingTextToImage": "文字到图像生成中",
|
||||
"statusGeneratingImageToImage": "图像到图像生成中",
|
||||
"statusGeneratingInpainting": "生成内画中",
|
||||
"statusGeneratingOutpainting": "生成外画中",
|
||||
"statusGenerationComplete": "生成完成",
|
||||
"statusIterationComplete": "迭代完成",
|
||||
"statusSavingImage": "图像保存中",
|
||||
"statusRestoringFaces": "脸部修复中",
|
||||
"statusRestoringFacesGFPGAN": "脸部修复中 (GFPGAN)",
|
||||
"statusRestoringFacesCodeFormer": "脸部修复中 (CodeFormer)",
|
||||
"statusUpscaling": "放大中",
|
||||
"statusUpscalingESRGAN": "放大中 (ESRGAN)",
|
||||
"statusLoadingModel": "模型加载中",
|
||||
"statusModelChanged": "模型已切换"
|
||||
}
|
16
frontend/dist/locales/gallery/de.json
vendored
16
frontend/dist/locales/gallery/de.json
vendored
@ -1,16 +0,0 @@
|
||||
{
|
||||
"generations": "Erzeugungen",
|
||||
"showGenerations": "Zeige Erzeugnisse",
|
||||
"uploads": "Uploads",
|
||||
"showUploads": "Zeige Uploads",
|
||||
"galleryImageSize": "Bildgröße",
|
||||
"galleryImageResetSize": "Größe zurücksetzen",
|
||||
"gallerySettings": "Galerie-Einstellungen",
|
||||
"maintainAspectRatio": "Seitenverhältnis beibehalten",
|
||||
"autoSwitchNewImages": "Automatisch zu neuen Bildern wechseln",
|
||||
"singleColumnLayout": "Einspaltiges Layout",
|
||||
"pinGallery": "Galerie anpinnen",
|
||||
"allImagesLoaded": "Alle Bilder geladen",
|
||||
"loadMore": "Mehr laden",
|
||||
"noImagesInGallery": "Keine Bilder in der Galerie"
|
||||
}
|
16
frontend/dist/locales/gallery/en-US.json
vendored
16
frontend/dist/locales/gallery/en-US.json
vendored
@ -1,16 +0,0 @@
|
||||
{
|
||||
"generations": "Generations",
|
||||
"showGenerations": "Show Generations",
|
||||
"uploads": "Uploads",
|
||||
"showUploads": "Show Uploads",
|
||||
"galleryImageSize": "Image Size",
|
||||
"galleryImageResetSize": "Reset Size",
|
||||
"gallerySettings": "Gallery Settings",
|
||||
"maintainAspectRatio": "Maintain Aspect Ratio",
|
||||
"autoSwitchNewImages": "Auto-Switch to New Images",
|
||||
"singleColumnLayout": "Single Column Layout",
|
||||
"pinGallery": "Pin Gallery",
|
||||
"allImagesLoaded": "All Images Loaded",
|
||||
"loadMore": "Load More",
|
||||
"noImagesInGallery": "No Images In Gallery"
|
||||
}
|
16
frontend/dist/locales/gallery/en.json
vendored
16
frontend/dist/locales/gallery/en.json
vendored
@ -1,16 +0,0 @@
|
||||
{
|
||||
"generations": "Generations",
|
||||
"showGenerations": "Show Generations",
|
||||
"uploads": "Uploads",
|
||||
"showUploads": "Show Uploads",
|
||||
"galleryImageSize": "Image Size",
|
||||
"galleryImageResetSize": "Reset Size",
|
||||
"gallerySettings": "Gallery Settings",
|
||||
"maintainAspectRatio": "Maintain Aspect Ratio",
|
||||
"autoSwitchNewImages": "Auto-Switch to New Images",
|
||||
"singleColumnLayout": "Single Column Layout",
|
||||
"pinGallery": "Pin Gallery",
|
||||
"allImagesLoaded": "All Images Loaded",
|
||||
"loadMore": "Load More",
|
||||
"noImagesInGallery": "No Images In Gallery"
|
||||
}
|
16
frontend/dist/locales/gallery/es.json
vendored
16
frontend/dist/locales/gallery/es.json
vendored
@ -1,16 +0,0 @@
|
||||
{
|
||||
"generations": "Generaciones",
|
||||
"showGenerations": "Mostrar Generaciones",
|
||||
"uploads": "Subidas de archivos",
|
||||
"showUploads": "Mostar Subidas",
|
||||
"galleryImageSize": "Tamaño de la imagen",
|
||||
"galleryImageResetSize": "Restablecer tamaño de la imagen",
|
||||
"gallerySettings": "Ajustes de la galería",
|
||||
"maintainAspectRatio": "Mantener relación de aspecto",
|
||||
"autoSwitchNewImages": "Auto seleccionar Imágenes nuevas",
|
||||
"singleColumnLayout": "Diseño de una columna",
|
||||
"pinGallery": "Fijar galería",
|
||||
"allImagesLoaded": "Todas las imágenes cargadas",
|
||||
"loadMore": "Cargar más",
|
||||
"noImagesInGallery": "Sin imágenes en la galería"
|
||||
}
|
1
frontend/dist/locales/gallery/fr.json
vendored
1
frontend/dist/locales/gallery/fr.json
vendored
@ -1 +0,0 @@
|
||||
{}
|
16
frontend/dist/locales/gallery/it.json
vendored
16
frontend/dist/locales/gallery/it.json
vendored
@ -1,16 +0,0 @@
|
||||
{
|
||||
"generations": "Generazioni",
|
||||
"showGenerations": "Mostra Generazioni",
|
||||
"uploads": "Caricamenti",
|
||||
"showUploads": "Mostra caricamenti",
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
"galleryImageResetSize": "Ripristina dimensioni",
|
||||
"gallerySettings": "Impostazioni della galleria",
|
||||
"maintainAspectRatio": "Mantenere le proporzioni",
|
||||
"autoSwitchNewImages": "Passaggio automatico a nuove immagini",
|
||||
"singleColumnLayout": "Layout a colonna singola",
|
||||
"pinGallery": "Blocca la galleria",
|
||||
"allImagesLoaded": "Tutte le immagini caricate",
|
||||
"loadMore": "Carica di più",
|
||||
"noImagesInGallery": "Nessuna immagine nella galleria"
|
||||
}
|
16
frontend/dist/locales/gallery/pl.json
vendored
16
frontend/dist/locales/gallery/pl.json
vendored
@ -1,16 +0,0 @@
|
||||
{
|
||||
"generations": "Wygenerowane",
|
||||
"showGenerations": "Pokaż wygenerowane obrazy",
|
||||
"uploads": "Przesłane",
|
||||
"showUploads": "Pokaż przesłane obrazy",
|
||||
"galleryImageSize": "Rozmiar obrazów",
|
||||
"galleryImageResetSize": "Resetuj rozmiar",
|
||||
"gallerySettings": "Ustawienia galerii",
|
||||
"maintainAspectRatio": "Zachowaj proporcje",
|
||||
"autoSwitchNewImages": "Przełączaj na nowe obrazy",
|
||||
"singleColumnLayout": "Układ jednokolumnowy",
|
||||
"pinGallery": "Przypnij galerię",
|
||||
"allImagesLoaded": "Koniec listy",
|
||||
"loadMore": "Wczytaj więcej",
|
||||
"noImagesInGallery": "Brak obrazów w galerii"
|
||||
}
|
1
frontend/dist/locales/gallery/pt.json
vendored
1
frontend/dist/locales/gallery/pt.json
vendored
@ -1 +0,0 @@
|
||||
{}
|
16
frontend/dist/locales/gallery/pt_br.json
vendored
16
frontend/dist/locales/gallery/pt_br.json
vendored
@ -1,16 +0,0 @@
|
||||
{
|
||||
"generations": "Gerações",
|
||||
"showGenerations": "Mostrar Gerações",
|
||||
"uploads": "Enviados",
|
||||
"showUploads": "Mostrar Enviados",
|
||||
"galleryImageSize": "Tamanho da Imagem",
|
||||
"galleryImageResetSize": "Resetar Imagem",
|
||||
"gallerySettings": "Configurações de Galeria",
|
||||
"maintainAspectRatio": "Mater Proporções",
|
||||
"autoSwitchNewImages": "Trocar para Novas Imagens Automaticamente",
|
||||
"singleColumnLayout": "Disposição em Coluna Única",
|
||||
"pinGallery": "Fixar Galeria",
|
||||
"allImagesLoaded": "Todas as Imagens Carregadas",
|
||||
"loadMore": "Carregar Mais",
|
||||
"noImagesInGallery": "Sem Imagens na Galeria"
|
||||
}
|
16
frontend/dist/locales/gallery/ru.json
vendored
16
frontend/dist/locales/gallery/ru.json
vendored
@ -1,16 +0,0 @@
|
||||
{
|
||||
"generations": "Генерации",
|
||||
"showGenerations": "Показывать генерации",
|
||||
"uploads": "Загрузки",
|
||||
"showUploads": "Показывать загрузки",
|
||||
"galleryImageSize": "Размер изображений",
|
||||
"galleryImageResetSize": "Размер по умолчанию",
|
||||
"gallerySettings": "Настройка галереи",
|
||||
"maintainAspectRatio": "Сохранять пропорции",
|
||||
"autoSwitchNewImages": "Автоматически выбирать новые",
|
||||
"singleColumnLayout": "Одна колонка",
|
||||
"pinGallery": "Закрепить галерею",
|
||||
"allImagesLoaded": "Все изображения загружены",
|
||||
"loadMore": "Показать больше",
|
||||
"noImagesInGallery": "Изображений нет"
|
||||
}
|
16
frontend/dist/locales/gallery/zh_cn.json
vendored
16
frontend/dist/locales/gallery/zh_cn.json
vendored
@ -1,16 +0,0 @@
|
||||
{
|
||||
"generations": "生成的图像",
|
||||
"showGenerations": "显示生成的图像",
|
||||
"uploads": "上传的图像",
|
||||
"showUploads": "显示上传的图像",
|
||||
"galleryImageSize": "预览大小",
|
||||
"galleryImageResetSize": "重置预览大小",
|
||||
"gallerySettings": "预览设置",
|
||||
"maintainAspectRatio": "保持比例",
|
||||
"autoSwitchNewImages": "自动切换到新图像",
|
||||
"singleColumnLayout": "单列布局",
|
||||
"pinGallery": "保持图库常开",
|
||||
"allImagesLoaded": "所有图像加载完成",
|
||||
"loadMore": "加载更多",
|
||||
"noImagesInGallery": "图库中无图像"
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user