Compare commits
205 Commits
invokeai-b
...
2.1.3-rc7
Author | SHA1 | Date | |
---|---|---|---|
9a1fe8e7fb | |||
ff56f5251b | |||
ed943bd6c7 | |||
7ad2355b1d | |||
66c920fc19 | |||
3fc5cb09f8 | |||
1345ec77ab | |||
b116715490 | |||
fa3670270e | |||
c304250ef6 | |||
802ce5dde5 | |||
311ee320ec | |||
e9df17b374 | |||
061fb4ef00 | |||
52be0d2396 | |||
4095acd10e | |||
201eb22d76 | |||
17ab982200 | |||
a04965b0e9 | |||
0b529f0c57 | |||
6f9f848345 | |||
918c1589ef | |||
116415b3fc | |||
b4b6eabaac | |||
4ef1f4a854 | |||
510fc4ebaa | |||
a20914434b | |||
0d134195fd | |||
649d8c8573 | |||
a358d370a0 | |||
94a9033c4f | |||
18a947c503 | |||
a23b031895 | |||
23af68c7d7 | |||
e258beeb51 | |||
7460c069b8 | |||
e481bfac61 | |||
5040747c67 | |||
d1ab65a431 | |||
af4ee7feb8 | |||
764fb29ade | |||
1014d3ba44 | |||
40a48aca88 | |||
92abc00f16 | |||
a5719aabf8 | |||
44a18511fa | |||
b850dbadaf | |||
9ef8b944d5 | |||
efc5a98488 | |||
1417c87928 | |||
2dd6fc2b93 | |||
22213612a0 | |||
71ee44a827 | |||
b17ca0a5e7 | |||
71bbfe4a1a | |||
5702271991 | |||
10781e7dc4 | |||
099d1157c5 | |||
ab825bf7ee | |||
10cfeb5ada | |||
e97515d045 | |||
0f04bc5789 | |||
3f74aabecd | |||
b1a99a51b7 | |||
8004f8a6d9 | |||
ff8ff2212a | |||
8e5363cd83 | |||
1450779146 | |||
8cd5d95b8a | |||
abd6407394 | |||
734dacfbe9 | |||
636620b1d5 | |||
1fe41146f0 | |||
2ad6ef355a | |||
865502ee4f | |||
c7984f3299 | |||
7f150ed833 | |||
badf4e256c | |||
e64c60bbb3 | |||
1780618543 | |||
f91fd27624 | |||
09e41e8f76 | |||
6eeb2107b3 | |||
17053ad8b7 | |||
fefb4dc1f8 | |||
d05b1b3544 | |||
82d4904c07 | |||
1cdcf33cfa | |||
6616fa835a | |||
7b9a4564b1 | |||
fcdefa0620 | |||
ef8b3ce639 | |||
36870a8f53 | |||
b70420951d | |||
1f0c5b4cf1 | |||
8648da8111 | |||
45b4593563 | |||
41b04316cf | |||
e97c6db2a3 | |||
896820a349 | |||
06c8f468bf | |||
61920e2701 | |||
f34ba7ca70 | |||
c30ef0895d | |||
aa3a774f73 | |||
2c30555b84 | |||
743f605773 | |||
519c661abb | |||
22c956c75f | |||
13696adc3a | |||
0196571a12 | |||
9666f466ab | |||
240e5486c8 | |||
8164b6b9cf | |||
4fc82d554f | |||
96b34c0f85 | |||
dd5a88dcee | |||
95ed56bf82 | |||
1ae80f5ab9 | |||
1f0bd3ca6c | |||
a1971f6830 | |||
c6118e8898 | |||
7ba958cf7f | |||
383905d5d2 | |||
6173e3e9ca | |||
3feb7d8922 | |||
1d9edbd0dd | |||
d439abdb89 | |||
ee47ea0c89 | |||
300bb2e627 | |||
ccf8593501 | |||
0fda612f3f | |||
5afff65b71 | |||
7e55bdefce | |||
620cf84d3d | |||
cfe567c62a | |||
cefe12f1df | |||
1e51c39928 | |||
42a02bbb80 | |||
f1ae6dae4c | |||
6195579910 | |||
16c8b23b34 | |||
07ae626b22 | |||
8d171bb044 | |||
6e33ca7e9e | |||
db46e12f2b | |||
868e4b2db8 | |||
2e562742c1 | |||
68e6958009 | |||
ea6e3a7949 | |||
b2879ca99f | |||
4e911566c3 | |||
9bafda6a15 | |||
871a8a5375 | |||
0eef74bc00 | |||
423ae32097 | |||
8282e5d045 | |||
19305cdbdf | |||
eb9028ab30 | |||
21483f5d07 | |||
82dcbac28f | |||
d43bd4625d | |||
ea891324a2 | |||
8fd9ea2193 | |||
fb02666856 | |||
f6f5c2731b | |||
b4e3f771e0 | |||
99bb9491ac | |||
0453f21127 | |||
9fc09aa4bd | |||
5e87062cf8 | |||
3e7a459990 | |||
bbf4c03e50 | |||
611a3a9753 | |||
1611f0d181 | |||
08835115e4 | |||
2d84e28d32 | |||
ef17aae8ab | |||
0cc39f01a3 | |||
688d7258f1 | |||
4513320bf1 | |||
533fd04ef0 | |||
dff5681cf0 | |||
5a2790a69b | |||
7c5305ccba | |||
4013e8ad6f | |||
d1dfd257f9 | |||
5322d735ee | |||
cdb107dcda | |||
be1393a41c | |||
e554c2607f | |||
6215592b12 | |||
349cc25433 | |||
214d276379 | |||
ef24d76adc | |||
ab2b5a691d | |||
c7de2b2801 | |||
e8075658ac | |||
4202dabee1 | |||
d67db2bcf1 | |||
7159ec885f | |||
b5cf734ba9 | |||
f7dc8eafee | |||
762ca60a30 | |||
e7fb9f342c |
@ -1,6 +0,0 @@
|
|||||||
[run]
|
|
||||||
omit='.env/*'
|
|
||||||
source='.'
|
|
||||||
|
|
||||||
[report]
|
|
||||||
show_missing = true
|
|
@ -1,25 +1,3 @@
|
|||||||
# use this file as a whitelist
|
|
||||||
*
|
*
|
||||||
!invokeai
|
!environment*.yml
|
||||||
!ldm
|
!docker-build
|
||||||
!pyproject.toml
|
|
||||||
|
|
||||||
# Guard against pulling in any models that might exist in the directory tree
|
|
||||||
**/*.pt*
|
|
||||||
**/*.ckpt
|
|
||||||
|
|
||||||
# ignore frontend but whitelist dist
|
|
||||||
invokeai/frontend/
|
|
||||||
!invokeai/frontend/dist/
|
|
||||||
|
|
||||||
# ignore invokeai/assets but whitelist invokeai/assets/web
|
|
||||||
invokeai/assets/
|
|
||||||
!invokeai/assets/web/
|
|
||||||
|
|
||||||
# Byte-compiled / optimized / DLL files
|
|
||||||
**/__pycache__/
|
|
||||||
**/*.py[cod]
|
|
||||||
|
|
||||||
# Distribution / packaging
|
|
||||||
*.egg-info/
|
|
||||||
*.egg
|
|
||||||
|
@ -1,30 +0,0 @@
|
|||||||
root = true
|
|
||||||
|
|
||||||
# All files
|
|
||||||
[*]
|
|
||||||
max_line_length = 80
|
|
||||||
charset = utf-8
|
|
||||||
end_of_line = lf
|
|
||||||
indent_size = 2
|
|
||||||
indent_style = space
|
|
||||||
insert_final_newline = true
|
|
||||||
trim_trailing_whitespace = true
|
|
||||||
|
|
||||||
# Python
|
|
||||||
[*.py]
|
|
||||||
indent_size = 4
|
|
||||||
max_line_length = 120
|
|
||||||
|
|
||||||
# css
|
|
||||||
[*.css]
|
|
||||||
indent_size = 4
|
|
||||||
|
|
||||||
# flake8
|
|
||||||
[.flake8]
|
|
||||||
indent_size = 4
|
|
||||||
|
|
||||||
# Markdown MkDocs
|
|
||||||
[docs/**/*.md]
|
|
||||||
max_line_length = 80
|
|
||||||
indent_size = 4
|
|
||||||
indent_style = unset
|
|
37
.flake8
@ -1,37 +0,0 @@
|
|||||||
[flake8]
|
|
||||||
max-line-length = 120
|
|
||||||
extend-ignore =
|
|
||||||
# See https://github.com/PyCQA/pycodestyle/issues/373
|
|
||||||
E203,
|
|
||||||
# use Bugbear's B950 instead
|
|
||||||
E501,
|
|
||||||
# from black repo https://github.com/psf/black/blob/main/.flake8
|
|
||||||
E266, W503, B907
|
|
||||||
extend-select =
|
|
||||||
# Bugbear line length
|
|
||||||
B950
|
|
||||||
extend-exclude =
|
|
||||||
scripts/orig_scripts/*
|
|
||||||
ldm/models/*
|
|
||||||
ldm/modules/*
|
|
||||||
ldm/data/*
|
|
||||||
ldm/generate.py
|
|
||||||
ldm/util.py
|
|
||||||
ldm/simplet2i.py
|
|
||||||
per-file-ignores =
|
|
||||||
# B950 line too long
|
|
||||||
# W605 invalid escape sequence
|
|
||||||
# F841 assigned to but never used
|
|
||||||
# F401 imported but unused
|
|
||||||
tests/test_prompt_parser.py: B950, W605, F401
|
|
||||||
tests/test_textual_inversion.py: F841, B950
|
|
||||||
# B023 Function definition does not bind loop variable
|
|
||||||
scripts/legacy_api.py: F401, B950, B023, F841
|
|
||||||
ldm/invoke/__init__.py: F401
|
|
||||||
# B010 Do not call setattr with a constant attribute value
|
|
||||||
ldm/invoke/server_legacy.py: B010
|
|
||||||
# =====================
|
|
||||||
# flake-quote settings:
|
|
||||||
# =====================
|
|
||||||
# Set this to match black style:
|
|
||||||
inline-quotes = double
|
|
66
.github/CODEOWNERS
vendored
@ -1,61 +1,5 @@
|
|||||||
# continuous integration
|
ldm/invoke/pngwriter.py @CapableWeb
|
||||||
/.github/workflows/ @mauwii @lstein @blessedcoolant
|
ldm/invoke/server_legacy.py @CapableWeb
|
||||||
|
scripts/legacy_api.py @CapableWeb
|
||||||
# documentation
|
tests/legacy_tests.sh @CapableWeb
|
||||||
/docs/ @lstein @mauwii @blessedcoolant
|
installer/ @tildebyte
|
||||||
mkdocs.yml @mauwii @lstein
|
|
||||||
|
|
||||||
# installation and configuration
|
|
||||||
/pyproject.toml @mauwii @lstein @ebr
|
|
||||||
/docker/ @mauwii
|
|
||||||
/scripts/ @ebr @lstein @blessedcoolant
|
|
||||||
/installer/ @ebr @lstein
|
|
||||||
ldm/invoke/config @lstein @ebr
|
|
||||||
invokeai/assets @lstein @blessedcoolant
|
|
||||||
invokeai/configs @lstein @ebr @blessedcoolant
|
|
||||||
/ldm/invoke/_version.py @lstein @blessedcoolant
|
|
||||||
|
|
||||||
# web ui
|
|
||||||
/invokeai/frontend @blessedcoolant @psychedelicious
|
|
||||||
/invokeai/backend @blessedcoolant @psychedelicious
|
|
||||||
|
|
||||||
# generation and model management
|
|
||||||
/ldm/*.py @lstein @blessedcoolant
|
|
||||||
/ldm/generate.py @lstein @keturn
|
|
||||||
/ldm/invoke/args.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/ckpt* @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/ckpt_generator @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/CLI.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/config @lstein @ebr @mauwii @blessedcoolant
|
|
||||||
/ldm/invoke/generator @keturn @damian0815
|
|
||||||
/ldm/invoke/globals.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/merge_diffusers.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/model_manager.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/txt2mask.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/patchmatch.py @Kyle0654 @lstein
|
|
||||||
/ldm/invoke/restoration @lstein @blessedcoolant
|
|
||||||
|
|
||||||
# attention, textual inversion, model configuration
|
|
||||||
/ldm/models @damian0815 @keturn @blessedcoolant
|
|
||||||
/ldm/modules/textual_inversion_manager.py @lstein @blessedcoolant
|
|
||||||
/ldm/modules/attention.py @damian0815 @keturn
|
|
||||||
/ldm/modules/diffusionmodules @damian0815 @keturn
|
|
||||||
/ldm/modules/distributions @damian0815 @keturn
|
|
||||||
/ldm/modules/ema.py @damian0815 @keturn
|
|
||||||
/ldm/modules/embedding_manager.py @lstein
|
|
||||||
/ldm/modules/encoders @damian0815 @keturn
|
|
||||||
/ldm/modules/image_degradation @damian0815 @keturn
|
|
||||||
/ldm/modules/losses @damian0815 @keturn
|
|
||||||
/ldm/modules/x_transformer.py @damian0815 @keturn
|
|
||||||
|
|
||||||
# Nodes
|
|
||||||
apps/ @Kyle0654 @jpphoto
|
|
||||||
|
|
||||||
# legacy REST API
|
|
||||||
# these are dead code
|
|
||||||
#/ldm/invoke/pngwriter.py @CapableWeb
|
|
||||||
#/ldm/invoke/server_legacy.py @CapableWeb
|
|
||||||
#/scripts/legacy_api.py @CapableWeb
|
|
||||||
#/tests/legacy_tests.sh @CapableWeb
|
|
||||||
|
|
||||||
|
|
||||||
|
113
.github/workflows/build-container.yml
vendored
@ -1,111 +1,48 @@
|
|||||||
|
# Building the Image without pushing to confirm it is still buildable
|
||||||
|
# confirum functionality would unfortunately need way more resources
|
||||||
name: build container image
|
name: build container image
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'main'
|
- 'main'
|
||||||
- 'update/ci/docker/*'
|
- 'development'
|
||||||
- 'update/docker/*'
|
|
||||||
paths:
|
|
||||||
- 'pyproject.toml'
|
|
||||||
- 'ldm/**'
|
|
||||||
- 'invokeai/backend/**'
|
|
||||||
- 'invokeai/configs/**'
|
|
||||||
- 'invokeai/frontend/dist/**'
|
|
||||||
- 'docker/Dockerfile'
|
|
||||||
tags:
|
|
||||||
- 'v*.*.*'
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docker:
|
docker:
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
flavor:
|
arch:
|
||||||
- amd
|
- x86_64
|
||||||
- cuda
|
- aarch64
|
||||||
- cpu
|
|
||||||
include:
|
include:
|
||||||
- flavor: amd
|
- arch: x86_64
|
||||||
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
conda-env-file: environment-lin-cuda.yml
|
||||||
- flavor: cuda
|
- arch: aarch64
|
||||||
pip-extra-index-url: ''
|
conda-env-file: environment-lin-aarch64.yml
|
||||||
- flavor: cpu
|
|
||||||
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: ${{ matrix.flavor }}
|
name: ${{ matrix.arch }}
|
||||||
env:
|
|
||||||
PLATFORMS: 'linux/amd64,linux/arm64'
|
|
||||||
DOCKERFILE: 'docker/Dockerfile'
|
|
||||||
steps:
|
steps:
|
||||||
|
- name: prepare docker-tag
|
||||||
|
env:
|
||||||
|
repository: ${{ github.repository }}
|
||||||
|
run: echo "dockertag=${repository,,}" >> $GITHUB_ENV
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Docker meta
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v4
|
|
||||||
with:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
images: |
|
|
||||||
ghcr.io/${{ github.repository }}
|
|
||||||
${{ vars.DOCKERHUB_REPOSITORY }}
|
|
||||||
tags: |
|
|
||||||
type=ref,event=branch
|
|
||||||
type=ref,event=tag
|
|
||||||
type=semver,pattern={{version}}
|
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
|
||||||
type=semver,pattern={{major}}
|
|
||||||
type=sha,enable=true,prefix=sha-,format=short
|
|
||||||
flavor: |
|
|
||||||
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
|
|
||||||
suffix=-${{ matrix.flavor }},onlatest=false
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v2
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
with:
|
|
||||||
platforms: ${{ env.PLATFORMS }}
|
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
|
||||||
if: github.event_name != 'pull_request'
|
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.repository_owner }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Login to Docker Hub
|
|
||||||
if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
|
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Build container
|
- name: Build container
|
||||||
id: docker_build
|
uses: docker/build-push-action@v3
|
||||||
uses: docker/build-push-action@v4
|
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: ${{ env.DOCKERFILE }}
|
file: docker-build/Dockerfile
|
||||||
platforms: ${{ env.PLATFORMS }}
|
platforms: Linux/${{ matrix.arch }}
|
||||||
push: ${{ github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' }}
|
push: false
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ env.dockertag }}:${{ matrix.arch }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
build-args: |
|
||||||
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
|
conda_env_file=${{ matrix.conda-env-file }}
|
||||||
cache-from: |
|
conda_version=py39_4.12.0-Linux-${{ matrix.arch }}
|
||||||
type=gha,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
invokeai_git=${{ github.repository }}
|
||||||
type=gha,scope=main-${{ matrix.flavor }}
|
invokeai_branch=${{ github.ref_name }}
|
||||||
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
|
||||||
|
|
||||||
- name: Docker Hub Description
|
|
||||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
|
|
||||||
uses: peter-evans/dockerhub-description@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
repository: ${{ vars.DOCKERHUB_REPOSITORY }}
|
|
||||||
short-description: ${{ github.event.repository.description }}
|
|
||||||
|
34
.github/workflows/clean-caches.yml
vendored
@ -1,34 +0,0 @@
|
|||||||
name: cleanup caches by a branch
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
types:
|
|
||||||
- closed
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
cleanup:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Check out code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Cleanup
|
|
||||||
run: |
|
|
||||||
gh extension install actions/gh-actions-cache
|
|
||||||
|
|
||||||
REPO=${{ github.repository }}
|
|
||||||
BRANCH=${{ github.ref }}
|
|
||||||
|
|
||||||
echo "Fetching list of cache key"
|
|
||||||
cacheKeysForPR=$(gh actions-cache list -R $REPO -B $BRANCH | cut -f 1 )
|
|
||||||
|
|
||||||
## Setting this to not fail the workflow while deleting cache keys.
|
|
||||||
set +e
|
|
||||||
echo "Deleting caches..."
|
|
||||||
for cacheKey in $cacheKeysForPR
|
|
||||||
do
|
|
||||||
gh actions-cache delete $cacheKey -R $REPO -B $BRANCH --confirm
|
|
||||||
done
|
|
||||||
echo "Done"
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
29
.github/workflows/lint-frontend.yml
vendored
@ -1,29 +0,0 @@
|
|||||||
name: Lint frontend
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- 'invokeai/frontend/**'
|
|
||||||
push:
|
|
||||||
paths:
|
|
||||||
- 'invokeai/frontend/**'
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
working-directory: invokeai/frontend
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
lint-frontend:
|
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
steps:
|
|
||||||
- name: Setup Node 18
|
|
||||||
uses: actions/setup-node@v3
|
|
||||||
with:
|
|
||||||
node-version: '18'
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- run: 'yarn install --frozen-lockfile'
|
|
||||||
- run: 'yarn tsc'
|
|
||||||
- run: 'yarn run madge'
|
|
||||||
- run: 'yarn run lint --max-warnings=0'
|
|
||||||
- run: 'yarn run prettier --check'
|
|
11
.github/workflows/mkdocs-material.yml
vendored
@ -7,12 +7,7 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
mkdocs-material:
|
mkdocs-material:
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
env:
|
|
||||||
REPO_URL: '${{ github.server_url }}/${{ github.repository }}'
|
|
||||||
REPO_NAME: '${{ github.repository }}'
|
|
||||||
SITE_URL: 'https://${{ github.repository_owner }}.github.io/InvokeAI'
|
|
||||||
steps:
|
steps:
|
||||||
- name: checkout sources
|
- name: checkout sources
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -23,15 +18,11 @@ jobs:
|
|||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.10'
|
python-version: '3.10'
|
||||||
cache: pip
|
|
||||||
cache-dependency-path: pyproject.toml
|
|
||||||
|
|
||||||
- name: install requirements
|
- name: install requirements
|
||||||
env:
|
|
||||||
PIP_USE_PEP517: 1
|
|
||||||
run: |
|
run: |
|
||||||
python -m \
|
python -m \
|
||||||
pip install ".[docs]"
|
pip install -r requirements-mkdocs.txt
|
||||||
|
|
||||||
- name: confirm buildability
|
- name: confirm buildability
|
||||||
run: |
|
run: |
|
||||||
|
20
.github/workflows/pyflakes.yml
vendored
@ -1,20 +0,0 @@
|
|||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
- development
|
|
||||||
- 'release-candidate-*'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
pyflakes:
|
|
||||||
name: runner / pyflakes
|
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: pyflakes
|
|
||||||
uses: reviewdog/action-pyflakes@v1
|
|
||||||
with:
|
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
reporter: github-pr-review
|
|
41
.github/workflows/pypi-release.yml
vendored
@ -1,41 +0,0 @@
|
|||||||
name: PyPI Release
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
paths:
|
|
||||||
- 'ldm/invoke/_version.py'
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
release:
|
|
||||||
if: github.repository == 'invoke-ai/InvokeAI'
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
env:
|
|
||||||
TWINE_USERNAME: __token__
|
|
||||||
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
|
|
||||||
TWINE_NON_INTERACTIVE: 1
|
|
||||||
steps:
|
|
||||||
- name: checkout sources
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: install deps
|
|
||||||
run: pip install --upgrade build twine
|
|
||||||
|
|
||||||
- name: build package
|
|
||||||
run: python3 -m build
|
|
||||||
|
|
||||||
- name: check distribution
|
|
||||||
run: twine check dist/*
|
|
||||||
|
|
||||||
- name: check PyPI versions
|
|
||||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/v2.3'
|
|
||||||
run: |
|
|
||||||
pip install --upgrade requests
|
|
||||||
python -c "\
|
|
||||||
import scripts.pypi_helper; \
|
|
||||||
EXISTS=scripts.pypi_helper.local_on_pypi(); \
|
|
||||||
print(f'PACKAGE_EXISTS={EXISTS}')" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: upload package
|
|
||||||
if: env.PACKAGE_EXISTS == 'False' && env.TWINE_PASSWORD != ''
|
|
||||||
run: twine upload dist/*
|
|
126
.github/workflows/test-invoke-conda.yml
vendored
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
name: Test invoke.py
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'main'
|
||||||
|
- 'development'
|
||||||
|
- 'fix-gh-actions-fork'
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- 'main'
|
||||||
|
- 'development'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
matrix:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
stable-diffusion-model:
|
||||||
|
# - 'https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt'
|
||||||
|
- 'https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt'
|
||||||
|
os:
|
||||||
|
- ubuntu-latest
|
||||||
|
- macOS-12
|
||||||
|
include:
|
||||||
|
- os: ubuntu-latest
|
||||||
|
environment-file: environment-lin-cuda.yml
|
||||||
|
default-shell: bash -l {0}
|
||||||
|
- os: macOS-12
|
||||||
|
environment-file: environment-mac.yml
|
||||||
|
default-shell: bash -l {0}
|
||||||
|
# - stable-diffusion-model: https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt
|
||||||
|
# stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1/sd-v1-4.ckpt
|
||||||
|
# stable-diffusion-model-switch: stable-diffusion-1.4
|
||||||
|
- stable-diffusion-model: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||||
|
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt
|
||||||
|
stable-diffusion-model-switch: stable-diffusion-1.5
|
||||||
|
name: ${{ matrix.os }} with ${{ matrix.stable-diffusion-model-switch }}
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
env:
|
||||||
|
CONDA_ENV_NAME: invokeai
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: ${{ matrix.default-shell }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout sources
|
||||||
|
id: checkout-sources
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: create models.yaml from example
|
||||||
|
run: cp configs/models.yaml.example configs/models.yaml
|
||||||
|
|
||||||
|
- name: create environment.yml
|
||||||
|
run: cp environments-and-requirements/${{ matrix.environment-file }} environment.yml
|
||||||
|
|
||||||
|
- name: Use cached conda packages
|
||||||
|
id: use-cached-conda-packages
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/conda_pkgs_dir
|
||||||
|
key: conda-pkgs-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles(matrix.environment-file) }}
|
||||||
|
|
||||||
|
- name: Activate Conda Env
|
||||||
|
id: activate-conda-env
|
||||||
|
uses: conda-incubator/setup-miniconda@v2
|
||||||
|
with:
|
||||||
|
activate-environment: ${{ env.CONDA_ENV_NAME }}
|
||||||
|
environment-file: environment.yml
|
||||||
|
miniconda-version: latest
|
||||||
|
|
||||||
|
- name: set test prompt to main branch validation
|
||||||
|
if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
|
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: set test prompt to development branch validation
|
||||||
|
if: ${{ github.ref == 'refs/heads/development' }}
|
||||||
|
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: set test prompt to Pull Request validation
|
||||||
|
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||||
|
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Use Cached Stable Diffusion Model
|
||||||
|
id: cache-sd-model
|
||||||
|
uses: actions/cache@v3
|
||||||
|
env:
|
||||||
|
cache-name: cache-${{ matrix.stable-diffusion-model-switch }}
|
||||||
|
with:
|
||||||
|
path: ${{ matrix.stable-diffusion-model-dl-path }}
|
||||||
|
key: ${{ env.cache-name }}
|
||||||
|
|
||||||
|
- name: Download ${{ matrix.stable-diffusion-model-switch }}
|
||||||
|
id: download-stable-diffusion-model
|
||||||
|
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
|
||||||
|
run: |
|
||||||
|
[[ -d models/ldm/stable-diffusion-v1 ]] \
|
||||||
|
|| mkdir -p models/ldm/stable-diffusion-v1
|
||||||
|
curl \
|
||||||
|
-H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" \
|
||||||
|
-o ${{ matrix.stable-diffusion-model-dl-path }} \
|
||||||
|
-L ${{ matrix.stable-diffusion-model }}
|
||||||
|
|
||||||
|
- name: run preload_models.py
|
||||||
|
id: run-preload-models
|
||||||
|
run: |
|
||||||
|
python scripts/preload_models.py \
|
||||||
|
--no-interactive
|
||||||
|
|
||||||
|
- name: Run the tests
|
||||||
|
id: run-tests
|
||||||
|
run: |
|
||||||
|
time python scripts/invoke.py \
|
||||||
|
--model ${{ matrix.stable-diffusion-model-switch }} \
|
||||||
|
--from_file ${{ env.TEST_PROMPTS }}
|
||||||
|
|
||||||
|
- name: export conda env
|
||||||
|
id: export-conda-env
|
||||||
|
run: |
|
||||||
|
mkdir -p outputs/img-samples
|
||||||
|
conda env export --name ${{ env.CONDA_ENV_NAME }} > outputs/img-samples/environment-${{ runner.os }}-${{ runner.arch }}.yml
|
||||||
|
|
||||||
|
- name: Archive results
|
||||||
|
id: archive-results
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: results_${{ matrix.os }}_${{ matrix.stable-diffusion-model-switch }}
|
||||||
|
path: outputs/img-samples
|
67
.github/workflows/test-invoke-pip-skip.yml
vendored
@ -1,67 +0,0 @@
|
|||||||
name: Test invoke.py pip
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
paths-ignore:
|
|
||||||
- 'pyproject.toml'
|
|
||||||
- 'ldm/**'
|
|
||||||
- 'invokeai/backend/**'
|
|
||||||
- 'invokeai/configs/**'
|
|
||||||
- 'invokeai/frontend/dist/**'
|
|
||||||
merge_group:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
matrix:
|
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
python-version:
|
|
||||||
# - '3.9'
|
|
||||||
- '3.10'
|
|
||||||
pytorch:
|
|
||||||
# - linux-cuda-11_6
|
|
||||||
- linux-cuda-11_7
|
|
||||||
- linux-rocm-5_2
|
|
||||||
- linux-cpu
|
|
||||||
- macos-default
|
|
||||||
- windows-cpu
|
|
||||||
# - windows-cuda-11_6
|
|
||||||
# - windows-cuda-11_7
|
|
||||||
include:
|
|
||||||
# - pytorch: linux-cuda-11_6
|
|
||||||
# os: ubuntu-22.04
|
|
||||||
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
|
||||||
# github-env: $GITHUB_ENV
|
|
||||||
- pytorch: linux-cuda-11_7
|
|
||||||
os: ubuntu-22.04
|
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
- pytorch: linux-rocm-5_2
|
|
||||||
os: ubuntu-22.04
|
|
||||||
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
- pytorch: linux-cpu
|
|
||||||
os: ubuntu-22.04
|
|
||||||
extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
- pytorch: macos-default
|
|
||||||
os: macOS-12
|
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
- pytorch: windows-cpu
|
|
||||||
os: windows-2022
|
|
||||||
github-env: $env:GITHUB_ENV
|
|
||||||
# - pytorch: windows-cuda-11_6
|
|
||||||
# os: windows-2022
|
|
||||||
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
|
||||||
# github-env: $env:GITHUB_ENV
|
|
||||||
# - pytorch: windows-cuda-11_7
|
|
||||||
# os: windows-2022
|
|
||||||
# extra-index-url: 'https://download.pytorch.org/whl/cu117'
|
|
||||||
# github-env: $env:GITHUB_ENV
|
|
||||||
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
steps:
|
|
||||||
- run: 'echo "No build required"'
|
|
148
.github/workflows/test-invoke-pip.yml
vendored
@ -1,148 +0,0 @@
|
|||||||
name: Test invoke.py pip
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- 'main'
|
|
||||||
paths:
|
|
||||||
- 'pyproject.toml'
|
|
||||||
- 'ldm/**'
|
|
||||||
- 'invokeai/backend/**'
|
|
||||||
- 'invokeai/configs/**'
|
|
||||||
- 'invokeai/frontend/dist/**'
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- 'pyproject.toml'
|
|
||||||
- 'ldm/**'
|
|
||||||
- 'invokeai/backend/**'
|
|
||||||
- 'invokeai/configs/**'
|
|
||||||
- 'invokeai/frontend/dist/**'
|
|
||||||
types:
|
|
||||||
- 'ready_for_review'
|
|
||||||
- 'opened'
|
|
||||||
- 'synchronize'
|
|
||||||
merge_group:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
matrix:
|
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
python-version:
|
|
||||||
# - '3.9'
|
|
||||||
- '3.10'
|
|
||||||
pytorch:
|
|
||||||
# - linux-cuda-11_6
|
|
||||||
- linux-cuda-11_7
|
|
||||||
- linux-rocm-5_2
|
|
||||||
- linux-cpu
|
|
||||||
- macos-default
|
|
||||||
- windows-cpu
|
|
||||||
# - windows-cuda-11_6
|
|
||||||
# - windows-cuda-11_7
|
|
||||||
include:
|
|
||||||
# - pytorch: linux-cuda-11_6
|
|
||||||
# os: ubuntu-22.04
|
|
||||||
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
|
||||||
# github-env: $GITHUB_ENV
|
|
||||||
- pytorch: linux-cuda-11_7
|
|
||||||
os: ubuntu-22.04
|
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
- pytorch: linux-rocm-5_2
|
|
||||||
os: ubuntu-22.04
|
|
||||||
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
- pytorch: linux-cpu
|
|
||||||
os: ubuntu-22.04
|
|
||||||
extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
- pytorch: macos-default
|
|
||||||
os: macOS-12
|
|
||||||
github-env: $GITHUB_ENV
|
|
||||||
- pytorch: windows-cpu
|
|
||||||
os: windows-2022
|
|
||||||
github-env: $env:GITHUB_ENV
|
|
||||||
# - pytorch: windows-cuda-11_6
|
|
||||||
# os: windows-2022
|
|
||||||
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
|
||||||
# github-env: $env:GITHUB_ENV
|
|
||||||
# - pytorch: windows-cuda-11_7
|
|
||||||
# os: windows-2022
|
|
||||||
# extra-index-url: 'https://download.pytorch.org/whl/cu117'
|
|
||||||
# github-env: $env:GITHUB_ENV
|
|
||||||
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
env:
|
|
||||||
PIP_USE_PEP517: '1'
|
|
||||||
steps:
|
|
||||||
- name: Checkout sources
|
|
||||||
id: checkout-sources
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: set test prompt to main branch validation
|
|
||||||
if: ${{ github.ref == 'refs/heads/main' }}
|
|
||||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
|
|
||||||
|
|
||||||
- name: set test prompt to Pull Request validation
|
|
||||||
if: ${{ github.ref != 'refs/heads/main' }}
|
|
||||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
|
||||||
|
|
||||||
- name: setup python
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: pip
|
|
||||||
cache-dependency-path: pyproject.toml
|
|
||||||
|
|
||||||
- name: install invokeai
|
|
||||||
env:
|
|
||||||
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
|
|
||||||
run: >
|
|
||||||
pip3 install
|
|
||||||
--editable=".[test]"
|
|
||||||
|
|
||||||
- name: run pytest
|
|
||||||
id: run-pytest
|
|
||||||
run: pytest
|
|
||||||
|
|
||||||
- name: set INVOKEAI_OUTDIR
|
|
||||||
run: >
|
|
||||||
python -c
|
|
||||||
"import os;from ldm.invoke.globals import Globals;OUTDIR=os.path.join(Globals.root,str('outputs'));print(f'INVOKEAI_OUTDIR={OUTDIR}')"
|
|
||||||
>> ${{ matrix.github-env }}
|
|
||||||
|
|
||||||
- name: run invokeai-configure
|
|
||||||
id: run-preload-models
|
|
||||||
env:
|
|
||||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGINGFACE_TOKEN }}
|
|
||||||
run: >
|
|
||||||
invokeai-configure
|
|
||||||
--yes
|
|
||||||
--default_only
|
|
||||||
--full-precision
|
|
||||||
# can't use fp16 weights without a GPU
|
|
||||||
|
|
||||||
- name: run invokeai
|
|
||||||
id: run-invokeai
|
|
||||||
env:
|
|
||||||
# Set offline mode to make sure configure preloaded successfully.
|
|
||||||
HF_HUB_OFFLINE: 1
|
|
||||||
HF_DATASETS_OFFLINE: 1
|
|
||||||
TRANSFORMERS_OFFLINE: 1
|
|
||||||
run: >
|
|
||||||
invokeai
|
|
||||||
--no-patchmatch
|
|
||||||
--no-nsfw_checker
|
|
||||||
--from_file ${{ env.TEST_PROMPTS }}
|
|
||||||
--outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }}
|
|
||||||
|
|
||||||
- name: Archive results
|
|
||||||
id: archive-results
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: results
|
|
||||||
path: ${{ env.INVOKEAI_OUTDIR }}
|
|
28
.gitignore
vendored
@ -1,6 +1,4 @@
|
|||||||
# ignore default image save location and model symbolic link
|
# ignore default image save location and model symbolic link
|
||||||
.idea/
|
|
||||||
embeddings/
|
|
||||||
outputs/
|
outputs/
|
||||||
models/ldm/stable-diffusion-v1/model.ckpt
|
models/ldm/stable-diffusion-v1/model.ckpt
|
||||||
**/restoration/codeformer/weights
|
**/restoration/codeformer/weights
|
||||||
@ -8,7 +6,6 @@ models/ldm/stable-diffusion-v1/model.ckpt
|
|||||||
# ignore user models config
|
# ignore user models config
|
||||||
configs/models.user.yaml
|
configs/models.user.yaml
|
||||||
config/models.user.yml
|
config/models.user.yml
|
||||||
invokeai.init
|
|
||||||
|
|
||||||
# ignore the Anaconda/Miniconda installer used while building Docker image
|
# ignore the Anaconda/Miniconda installer used while building Docker image
|
||||||
anaconda.sh
|
anaconda.sh
|
||||||
@ -68,13 +65,11 @@ htmlcov/
|
|||||||
.cache
|
.cache
|
||||||
nosetests.xml
|
nosetests.xml
|
||||||
coverage.xml
|
coverage.xml
|
||||||
cov.xml
|
|
||||||
*.cover
|
*.cover
|
||||||
*.py,cover
|
*.py,cover
|
||||||
.hypothesis/
|
.hypothesis/
|
||||||
.pytest_cache/
|
.pytest_cache/
|
||||||
cover/
|
cover/
|
||||||
junit/
|
|
||||||
|
|
||||||
# Translations
|
# Translations
|
||||||
*.mo
|
*.mo
|
||||||
@ -198,7 +193,11 @@ checkpoints
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
|
|
||||||
# Let the frontend manage its own gitignore
|
# Let the frontend manage its own gitignore
|
||||||
!invokeai/frontend/*
|
!frontend/*
|
||||||
|
frontend/apt-get
|
||||||
|
frontend/dist
|
||||||
|
frontend/sudo
|
||||||
|
frontend/update
|
||||||
|
|
||||||
# Scratch folder
|
# Scratch folder
|
||||||
.scratch/
|
.scratch/
|
||||||
@ -219,7 +218,7 @@ models/clipseg
|
|||||||
models/gfpgan
|
models/gfpgan
|
||||||
|
|
||||||
# ignore initfile
|
# ignore initfile
|
||||||
.invokeai
|
invokeai.init
|
||||||
|
|
||||||
# ignore environment.yml and requirements.txt
|
# ignore environment.yml and requirements.txt
|
||||||
# these are links to the real files in environments-and-requirements
|
# these are links to the real files in environments-and-requirements
|
||||||
@ -227,11 +226,12 @@ environment.yml
|
|||||||
requirements.txt
|
requirements.txt
|
||||||
|
|
||||||
# source installer files
|
# source installer files
|
||||||
installer/*zip
|
source_installer/*zip
|
||||||
installer/install.bat
|
source_installer/invokeAI
|
||||||
installer/install.sh
|
install.bat
|
||||||
installer/update.bat
|
install.sh
|
||||||
installer/update.sh
|
update.bat
|
||||||
|
update.sh
|
||||||
|
|
||||||
# no longer stored in source directory
|
# this may be present if the user created a venv
|
||||||
models
|
invokeai
|
||||||
|
@ -1,41 +0,0 @@
|
|||||||
# See https://pre-commit.com for more information
|
|
||||||
# See https://pre-commit.com/hooks.html for more hooks
|
|
||||||
repos:
|
|
||||||
- repo: https://github.com/psf/black
|
|
||||||
rev: 23.1.0
|
|
||||||
hooks:
|
|
||||||
- id: black
|
|
||||||
|
|
||||||
- repo: https://github.com/pycqa/isort
|
|
||||||
rev: 5.12.0
|
|
||||||
hooks:
|
|
||||||
- id: isort
|
|
||||||
|
|
||||||
- repo: https://github.com/PyCQA/flake8
|
|
||||||
rev: 6.0.0
|
|
||||||
hooks:
|
|
||||||
- id: flake8
|
|
||||||
additional_dependencies:
|
|
||||||
- flake8-black
|
|
||||||
- flake8-bugbear
|
|
||||||
- flake8-comprehensions
|
|
||||||
- flake8-simplify
|
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/mirrors-prettier
|
|
||||||
rev: 'v3.0.0-alpha.4'
|
|
||||||
hooks:
|
|
||||||
- id: prettier
|
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
|
||||||
rev: v4.4.0
|
|
||||||
hooks:
|
|
||||||
- id: check-added-large-files
|
|
||||||
- id: check-executables-have-shebangs
|
|
||||||
- id: check-shebang-scripts-are-executable
|
|
||||||
- id: check-merge-conflict
|
|
||||||
- id: check-symlinks
|
|
||||||
- id: check-toml
|
|
||||||
- id: end-of-file-fixer
|
|
||||||
- id: no-commit-to-branch
|
|
||||||
args: ['--branch', 'main']
|
|
||||||
- id: trailing-whitespace
|
|
@ -1,14 +0,0 @@
|
|||||||
invokeai/frontend/.husky
|
|
||||||
invokeai/frontend/patches
|
|
||||||
|
|
||||||
# Ignore artifacts:
|
|
||||||
build
|
|
||||||
coverage
|
|
||||||
static
|
|
||||||
invokeai/frontend/dist
|
|
||||||
|
|
||||||
# Ignore all HTML files:
|
|
||||||
*.html
|
|
||||||
|
|
||||||
# Ignore deprecated docs
|
|
||||||
docs/installation/deprecated_documentation
|
|
@ -1,9 +1,9 @@
|
|||||||
embeddedLanguageFormatting: auto
|
|
||||||
endOfLine: lf
|
endOfLine: lf
|
||||||
singleQuote: true
|
tabWidth: 2
|
||||||
semi: true
|
|
||||||
trailingComma: es5
|
|
||||||
useTabs: false
|
useTabs: false
|
||||||
|
singleQuote: true
|
||||||
|
quoteProps: as-needed
|
||||||
|
embeddedLanguageFormatting: auto
|
||||||
overrides:
|
overrides:
|
||||||
- files: '*.md'
|
- files: '*.md'
|
||||||
options:
|
options:
|
||||||
@ -11,9 +11,3 @@ overrides:
|
|||||||
printWidth: 80
|
printWidth: 80
|
||||||
parser: markdown
|
parser: markdown
|
||||||
cursorOffset: -1
|
cursorOffset: -1
|
||||||
- files: docs/**/*.md
|
|
||||||
options:
|
|
||||||
tabWidth: 4
|
|
||||||
- files: 'invokeai/frontend/public/locales/*.json'
|
|
||||||
options:
|
|
||||||
tabWidth: 4
|
|
||||||
|
@ -1,5 +0,0 @@
|
|||||||
[pytest]
|
|
||||||
DJANGO_SETTINGS_MODULE = webtas.settings
|
|
||||||
; python_files = tests.py test_*.py *_tests.py
|
|
||||||
|
|
||||||
addopts = --cov=. --cov-config=.coveragerc --cov-report xml:cov.xml
|
|
@ -1,128 +0,0 @@
|
|||||||
# Contributor Covenant Code of Conduct
|
|
||||||
|
|
||||||
## Our Pledge
|
|
||||||
|
|
||||||
We as members, contributors, and leaders pledge to make participation in our
|
|
||||||
community a harassment-free experience for everyone, regardless of age, body
|
|
||||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
|
||||||
identity and expression, level of experience, education, socio-economic status,
|
|
||||||
nationality, personal appearance, race, religion, or sexual identity
|
|
||||||
and orientation.
|
|
||||||
|
|
||||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
|
||||||
diverse, inclusive, and healthy community.
|
|
||||||
|
|
||||||
## Our Standards
|
|
||||||
|
|
||||||
Examples of behavior that contributes to a positive environment for our
|
|
||||||
community include:
|
|
||||||
|
|
||||||
* Demonstrating empathy and kindness toward other people
|
|
||||||
* Being respectful of differing opinions, viewpoints, and experiences
|
|
||||||
* Giving and gracefully accepting constructive feedback
|
|
||||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
|
||||||
and learning from the experience
|
|
||||||
* Focusing on what is best not just for us as individuals, but for the
|
|
||||||
overall community
|
|
||||||
|
|
||||||
Examples of unacceptable behavior include:
|
|
||||||
|
|
||||||
* The use of sexualized language or imagery, and sexual attention or
|
|
||||||
advances of any kind
|
|
||||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
|
||||||
* Public or private harassment
|
|
||||||
* Publishing others' private information, such as a physical or email
|
|
||||||
address, without their explicit permission
|
|
||||||
* Other conduct which could reasonably be considered inappropriate in a
|
|
||||||
professional setting
|
|
||||||
|
|
||||||
## Enforcement Responsibilities
|
|
||||||
|
|
||||||
Community leaders are responsible for clarifying and enforcing our standards of
|
|
||||||
acceptable behavior and will take appropriate and fair corrective action in
|
|
||||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
|
||||||
or harmful.
|
|
||||||
|
|
||||||
Community leaders have the right and responsibility to remove, edit, or reject
|
|
||||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
|
||||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
|
||||||
decisions when appropriate.
|
|
||||||
|
|
||||||
## Scope
|
|
||||||
|
|
||||||
This Code of Conduct applies within all community spaces, and also applies when
|
|
||||||
an individual is officially representing the community in public spaces.
|
|
||||||
Examples of representing our community include using an official e-mail address,
|
|
||||||
posting via an official social media account, or acting as an appointed
|
|
||||||
representative at an online or offline event.
|
|
||||||
|
|
||||||
## Enforcement
|
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
|
||||||
may be reported to the community leaders responsible for enforcement
|
|
||||||
at https://github.com/invoke-ai/InvokeAI/issues. All complaints will
|
|
||||||
be reviewed and investigated promptly and fairly.
|
|
||||||
|
|
||||||
All community leaders are obligated to respect the privacy and security of the
|
|
||||||
reporter of any incident.
|
|
||||||
|
|
||||||
## Enforcement Guidelines
|
|
||||||
|
|
||||||
Community leaders will follow these Community Impact Guidelines in determining
|
|
||||||
the consequences for any action they deem in violation of this Code of Conduct:
|
|
||||||
|
|
||||||
### 1. Correction
|
|
||||||
|
|
||||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
|
||||||
unprofessional or unwelcome in the community.
|
|
||||||
|
|
||||||
**Consequence**: A private, written warning from community leaders, providing
|
|
||||||
clarity around the nature of the violation and an explanation of why the
|
|
||||||
behavior was inappropriate. A public apology may be requested.
|
|
||||||
|
|
||||||
### 2. Warning
|
|
||||||
|
|
||||||
**Community Impact**: A violation through a single incident or series
|
|
||||||
of actions.
|
|
||||||
|
|
||||||
**Consequence**: A warning with consequences for continued behavior. No
|
|
||||||
interaction with the people involved, including unsolicited interaction with
|
|
||||||
those enforcing the Code of Conduct, for a specified period of time. This
|
|
||||||
includes avoiding interactions in community spaces as well as external channels
|
|
||||||
like social media. Violating these terms may lead to a temporary or
|
|
||||||
permanent ban.
|
|
||||||
|
|
||||||
### 3. Temporary Ban
|
|
||||||
|
|
||||||
**Community Impact**: A serious violation of community standards, including
|
|
||||||
sustained inappropriate behavior.
|
|
||||||
|
|
||||||
**Consequence**: A temporary ban from any sort of interaction or public
|
|
||||||
communication with the community for a specified period of time. No public or
|
|
||||||
private interaction with the people involved, including unsolicited interaction
|
|
||||||
with those enforcing the Code of Conduct, is allowed during this period.
|
|
||||||
Violating these terms may lead to a permanent ban.
|
|
||||||
|
|
||||||
### 4. Permanent Ban
|
|
||||||
|
|
||||||
**Community Impact**: Demonstrating a pattern of violation of community
|
|
||||||
standards, including sustained inappropriate behavior, harassment of an
|
|
||||||
individual, or aggression toward or disparagement of classes of individuals.
|
|
||||||
|
|
||||||
**Consequence**: A permanent ban from any sort of public interaction within
|
|
||||||
the community.
|
|
||||||
|
|
||||||
## Attribution
|
|
||||||
|
|
||||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
|
||||||
version 2.0, available at
|
|
||||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
|
||||||
|
|
||||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
|
||||||
enforcement ladder](https://github.com/mozilla/diversity).
|
|
||||||
|
|
||||||
[homepage]: https://www.contributor-covenant.org
|
|
||||||
|
|
||||||
For answers to common questions about this code of conduct, see the FAQ at
|
|
||||||
https://www.contributor-covenant.org/faq. Translations are available at
|
|
||||||
https://www.contributor-covenant.org/translations.
|
|
@ -1,84 +0,0 @@
|
|||||||
<img src="docs/assets/invoke_ai_banner.png" align="center">
|
|
||||||
|
|
||||||
Invoke-AI is a community of software developers, researchers, and user
|
|
||||||
interface experts who have come together on a voluntary basis to build
|
|
||||||
software tools which support cutting edge AI text-to-image
|
|
||||||
applications. This community is open to anyone who wishes to
|
|
||||||
contribute to the effort and has the skill and time to do so.
|
|
||||||
|
|
||||||
# Our Values
|
|
||||||
|
|
||||||
The InvokeAI team is a diverse community which includes individuals
|
|
||||||
from various parts of the world and many walks of life. Despite our
|
|
||||||
differences, we share a number of core values which we ask prospective
|
|
||||||
contributors to understand and respect. We believe:
|
|
||||||
|
|
||||||
1. That Open Source Software is a positive force in the world. We
|
|
||||||
create software that can be used, reused, and redistributed, without
|
|
||||||
restrictions, under a straightforward Open Source license (MIT). We
|
|
||||||
believe that Open Source benefits society as a whole by increasing the
|
|
||||||
availability of high quality software to all.
|
|
||||||
|
|
||||||
2. That those who create software should receive proper attribution
|
|
||||||
for their creative work. While we support the exchange and reuse of
|
|
||||||
Open Source Software, we feel strongly that the original authors of a
|
|
||||||
piece of code should receive credit for their contribution, and we
|
|
||||||
endeavor to do so whenever possible.
|
|
||||||
|
|
||||||
3. That there is moral ambiguity surrounding AI-assisted art. We are
|
|
||||||
aware of the moral and ethical issues surrounding the release of the
|
|
||||||
Stable Diffusion model and similar products. We are aware that, due to
|
|
||||||
the composition of their training sets, current AI-generated image
|
|
||||||
models are biased against certain ethnic groups, cultural concepts of
|
|
||||||
beauty, ethnic stereotypes, and gender roles.
|
|
||||||
|
|
||||||
1. We recognize the potential for harm to these groups that these biases
|
|
||||||
represent and trust that future AI models will take steps towards
|
|
||||||
reducing or eliminating the biases noted above, respect and give due
|
|
||||||
credit to the artists whose work is sourced, and call on developers
|
|
||||||
and users to favor these models over the older ones as they become
|
|
||||||
available.
|
|
||||||
|
|
||||||
4. We are deeply committed to ensuring that this technology benefits
|
|
||||||
everyone, including artists. We see AI art not as a replacement for
|
|
||||||
the artist, but rather as a tool to empower them. With that
|
|
||||||
in mind, we are constantly debating how to build systems that put
|
|
||||||
artists’ needs first: tools which can be readily integrated into an
|
|
||||||
artist’s existing workflows and practices, enhancing their work and
|
|
||||||
helping them to push it further. Every decision we take as a team,
|
|
||||||
which includes several artists, aims to build towards that goal.
|
|
||||||
|
|
||||||
5. That artificial intelligence can be a force for good in the world,
|
|
||||||
but must be used responsibly. Artificial intelligence technologies
|
|
||||||
have the potential to improve society, in everything from cancer care,
|
|
||||||
to customer service, to creative writing.
|
|
||||||
|
|
||||||
1. While we do not believe that software should arbitrarily limit what
|
|
||||||
users can do with it, we recognize that when used irresponsibly, AI
|
|
||||||
has the potential to do much harm. Our Discord server is actively
|
|
||||||
moderated in order to minimize the potential of harm from
|
|
||||||
user-contributed images. In addition, we ask users of our software to
|
|
||||||
refrain from using it in any way that would cause mental, emotional or
|
|
||||||
physical harm to individuals and vulnerable populations including (but
|
|
||||||
not limited to) women; minors; ethnic minorities; religious groups;
|
|
||||||
members of LGBTQIA communities; and people with disabilities or
|
|
||||||
impairments.
|
|
||||||
|
|
||||||
2. Note that some of the image generation AI models which the Invoke-AI
|
|
||||||
toolkit supports carry licensing agreements which impose restrictions
|
|
||||||
on how the model is used. We ask that our users read and agree to
|
|
||||||
these terms if they wish to make use of these models. These agreements
|
|
||||||
are distinct from the MIT license which applies to the InvokeAI
|
|
||||||
software and source code.
|
|
||||||
|
|
||||||
6. That mutual respect is key to a healthy software development
|
|
||||||
community. Members of the InvokeAI community are expected to treat
|
|
||||||
each other with respect, beneficence, and empathy. Each of us has a
|
|
||||||
different background and a unique set of skills. We strive to help
|
|
||||||
each other grow and gain new skills, and we apportion expectations in
|
|
||||||
a way that balances the members' time, skillset, and interest
|
|
||||||
area. Disputes are resolved by open and honest communication.
|
|
||||||
|
|
||||||
## Signature
|
|
||||||
|
|
||||||
This document has been collectively crafted and approved by the current InvokeAI team members, as of 28 Nov 2022: **lstein** (Lincoln Stein), **blessedcoolant**, **hipsterusername** (Kent Keirsey), **Kyle0654** (Kyle Schouviller), **damian0815**, **mauwii** (Matthias Wild), **Netsvetaev** (Artur Netsvetaev), **psychedelicious**, **tildebyte**, **keturn**, and **ebr** (Eugene Brodsky). Although individuals within the group may hold differing views on particular details and/or their implications, we are all in agreement about its fundamental statements, as well as their significance and importance to this project moving forward.
|
|
329
README.md
@ -1,19 +1,23 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
# InvokeAI: A Stable Diffusion Toolkit
|
# InvokeAI: A Stable Diffusion Toolkit
|
||||||
|
|
||||||
|
_Formerly known as lstein/stable-diffusion_
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
[![discord badge]][discord link]
|
[![discord badge]][discord link]
|
||||||
|
|
||||||
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]
|
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]
|
||||||
|
|
||||||
[![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link]
|
[![CI checks on main badge]][CI checks on main link] [![CI checks on dev badge]][CI checks on dev link] [![latest commit to dev badge]][latest commit to dev link]
|
||||||
|
|
||||||
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link]
|
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link]
|
||||||
|
|
||||||
|
[CI checks on dev badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
|
||||||
|
[CI checks on dev link]: https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
|
||||||
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||||
[CI checks on main link]:https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain
|
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
||||||
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
||||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||||
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||||
@ -24,252 +28,161 @@
|
|||||||
[github open prs link]: https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
|
[github open prs link]: https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
|
||||||
[github stars badge]: https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
|
[github stars badge]: https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
|
||||||
[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
|
[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
|
||||||
[latest commit to main badge]: https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/main?icon=github&color=yellow&label=last%20dev%20commit&cache=900
|
[latest commit to dev badge]: https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
|
||||||
[latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main
|
[latest commit to dev link]: https://github.com/invoke-ai/InvokeAI/commits/development
|
||||||
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||||
[translation status badge]: https://hosted.weblate.org/widgets/invokeai/-/svg-badge.svg
|
|
||||||
[translation status link]: https://hosted.weblate.org/engage/invokeai/
|
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
InvokeAI is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. InvokeAI offers an industry leading Web Interface, interactive Command Line Interface, and also serves as the foundation for multiple commercial products.
|
This is a fork of
|
||||||
|
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion),
|
||||||
|
the open source text-to-image generator. It provides a streamlined
|
||||||
|
process with various new features and options to aid the image
|
||||||
|
generation process. It runs on Windows, Mac and Linux machines, with
|
||||||
|
GPU cards with as little as 4 GB of RAM. It provides both a polished
|
||||||
|
Web interface (see below), and an easy-to-use command-line interface.
|
||||||
|
|
||||||
**Quick links**: [[How to Install](https://invoke-ai.github.io/InvokeAI/#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||||
|
|
||||||
_Note: InvokeAI is rapidly evolving. Please use the
|
<div align="center"><img src="docs/assets/invoke-web-server-1.png" width=640></div>
|
||||||
|
|
||||||
|
|
||||||
|
_Note: This fork is rapidly evolving. Please use the
|
||||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||||
requests. Be sure to use the provided templates. They will help us diagnose issues faster._
|
requests. Be sure to use the provided templates. They will help aid diagnose issues faster._
|
||||||
|
|
||||||
<div align="center">
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
</div>
|
|
||||||
|
|
||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
|
||||||
1. [Quick Start](#getting-started-with-invokeai)
|
1. [Installation](#installation)
|
||||||
2. [Installation](#detailed-installation-instructions)
|
2. [Hardware Requirements](#hardware-requirements)
|
||||||
3. [Hardware Requirements](#hardware-requirements)
|
3. [Features](#features)
|
||||||
4. [Features](#features)
|
4. [Latest Changes](#latest-changes)
|
||||||
5. [Latest Changes](#latest-changes)
|
5. [Troubleshooting](#troubleshooting)
|
||||||
6. [Troubleshooting](#troubleshooting)
|
6. [Contributing](#contributing)
|
||||||
7. [Contributing](#contributing)
|
7. [Contributors](#contributors)
|
||||||
8. [Contributors](#contributors)
|
8. [Support](#support)
|
||||||
9. [Support](#support)
|
9. [Further Reading](#further-reading)
|
||||||
10. [Further Reading](#further-reading)
|
|
||||||
|
|
||||||
## Getting Started with InvokeAI
|
### Installation
|
||||||
|
|
||||||
For full installation and upgrade instructions, please see:
|
This fork is supported across multiple platforms. You can find individual installation instructions
|
||||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
below.
|
||||||
|
|
||||||
### Automatic Installer (suggested for 1st time users)
|
- #### [Linux](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_LINUX/)
|
||||||
|
|
||||||
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
|
- #### [Windows](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_WINDOWS/)
|
||||||
|
|
||||||
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
- #### [Macintosh](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_MAC/)
|
||||||
|
|
||||||
3. Unzip the file.
|
### Hardware Requirements
|
||||||
|
|
||||||
4. If you are on Windows, double-click on the `install.bat` script. On
|
#### System
|
||||||
macOS, open a Terminal window, drag the file `install.sh` from Finder
|
|
||||||
into the Terminal, and press return. On Linux, run `install.sh`.
|
|
||||||
|
|
||||||
5. You'll be asked to confirm the location of the folder in which
|
You wil need one of the following:
|
||||||
to install InvokeAI and its image generation model files. Pick a
|
|
||||||
location with at least 15 GB of free memory. More if you plan on
|
|
||||||
installing lots of models.
|
|
||||||
|
|
||||||
6. Wait while the installer does its thing. After installing the software,
|
|
||||||
the installer will launch a script that lets you configure InvokeAI and
|
|
||||||
select a set of starting image generaiton models.
|
|
||||||
|
|
||||||
7. Find the folder that InvokeAI was installed into (it is not the
|
|
||||||
same as the unpacked zip file directory!) The default location of this
|
|
||||||
folder (if you didn't change it in step 5) is `~/invokeai` on
|
|
||||||
Linux/Mac systems, and `C:\Users\YourName\invokeai` on Windows. This directory will contain launcher scripts named `invoke.sh` and `invoke.bat`.
|
|
||||||
|
|
||||||
8. On Windows systems, double-click on the `invoke.bat` file. On
|
|
||||||
macOS, open a Terminal window, drag `invoke.sh` from the folder into
|
|
||||||
the Terminal, and press return. On Linux, run `invoke.sh`
|
|
||||||
|
|
||||||
9. Press 2 to open the "browser-based UI", press enter/return, wait a
|
|
||||||
minute or two for Stable Diffusion to start up, then open your browser
|
|
||||||
and go to http://localhost:9090.
|
|
||||||
|
|
||||||
10. Type `banana sushi` in the box on the top left and click `Invoke`
|
|
||||||
|
|
||||||
### Command-Line Installation (for users familiar with Terminals)
|
|
||||||
|
|
||||||
You must have Python 3.9 or 3.10 installed on your machine. Earlier or later versions are
|
|
||||||
not supported.
|
|
||||||
|
|
||||||
1. Open a command-line window on your machine. The PowerShell is recommended for Windows.
|
|
||||||
2. Create a directory to install InvokeAI into. You'll need at least 15 GB of free space:
|
|
||||||
|
|
||||||
```terminal
|
|
||||||
mkdir invokeai
|
|
||||||
````
|
|
||||||
|
|
||||||
3. Create a virtual environment named `.venv` inside this directory and activate it:
|
|
||||||
|
|
||||||
```terminal
|
|
||||||
cd invokeai
|
|
||||||
python -m venv .venv --prompt InvokeAI
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Activate the virtual environment (do it every time you run InvokeAI)
|
|
||||||
|
|
||||||
_For Linux/Mac users:_
|
|
||||||
|
|
||||||
```sh
|
|
||||||
source .venv/bin/activate
|
|
||||||
```
|
|
||||||
|
|
||||||
_For Windows users:_
|
|
||||||
|
|
||||||
```ps
|
|
||||||
.venv\Scripts\activate
|
|
||||||
```
|
|
||||||
|
|
||||||
5. Install the InvokeAI module and its dependencies. Choose the command suited for your platform & GPU.
|
|
||||||
|
|
||||||
_For Windows/Linux with an NVIDIA GPU:_
|
|
||||||
|
|
||||||
```terminal
|
|
||||||
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
|
||||||
```
|
|
||||||
|
|
||||||
_For Linux with an AMD GPU:_
|
|
||||||
|
|
||||||
```sh
|
|
||||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
|
||||||
```
|
|
||||||
|
|
||||||
_For Macintoshes, either Intel or M1/M2:_
|
|
||||||
|
|
||||||
```sh
|
|
||||||
pip install InvokeAI --use-pep517
|
|
||||||
```
|
|
||||||
|
|
||||||
6. Configure InvokeAI and install a starting set of image generation models (you only need to do this once):
|
|
||||||
|
|
||||||
```terminal
|
|
||||||
invokeai-configure
|
|
||||||
```
|
|
||||||
|
|
||||||
7. Launch the web server (do it every time you run InvokeAI):
|
|
||||||
|
|
||||||
```terminal
|
|
||||||
invokeai --web
|
|
||||||
```
|
|
||||||
|
|
||||||
8. Point your browser to http://localhost:9090 to bring up the web interface.
|
|
||||||
9. Type `banana sushi` in the box on the top left and click `Invoke`.
|
|
||||||
|
|
||||||
Be sure to activate the virtual environment each time before re-launching InvokeAI,
|
|
||||||
using `source .venv/bin/activate` or `.venv\Scripts\activate`.
|
|
||||||
|
|
||||||
### Detailed Installation Instructions
|
|
||||||
|
|
||||||
This fork is supported across Linux, Windows and Macintosh. Linux
|
|
||||||
users can use either an Nvidia-based card (with CUDA support) or an
|
|
||||||
AMD card (using the ROCm driver). For full installation and upgrade
|
|
||||||
instructions, please see:
|
|
||||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
|
|
||||||
|
|
||||||
## Hardware Requirements
|
|
||||||
|
|
||||||
InvokeAI is supported across Linux, Windows and macOS. Linux
|
|
||||||
users can use either an Nvidia-based card (with CUDA support) or an
|
|
||||||
AMD card (using the ROCm driver).
|
|
||||||
|
|
||||||
### System
|
|
||||||
|
|
||||||
You will need one of the following:
|
|
||||||
|
|
||||||
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||||
- An Apple computer with an M1 chip.
|
- An Apple computer with an M1 chip.
|
||||||
- An AMD-based graphics card with 4GB or more VRAM memory. (Linux only)
|
|
||||||
|
|
||||||
We do not recommend the GTX 1650 or 1660 series video cards. They are
|
#### Memory
|
||||||
unable to run in half-precision mode and do not have sufficient VRAM
|
|
||||||
to render 512x512 images.
|
|
||||||
|
|
||||||
### Memory
|
|
||||||
|
|
||||||
- At least 12 GB Main Memory RAM.
|
- At least 12 GB Main Memory RAM.
|
||||||
|
|
||||||
### Disk
|
#### Disk
|
||||||
|
|
||||||
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
||||||
|
|
||||||
## Features
|
**Note**
|
||||||
|
|
||||||
Feature documentation can be reviewed by navigating to [the InvokeAI Documentation page](https://invoke-ai.github.io/InvokeAI/features/)
|
If you have a Nvidia 10xx series card (e.g. the 1080ti), please
|
||||||
|
run the dream script in full-precision mode as shown below.
|
||||||
|
|
||||||
### *Web Server & UI*
|
Similarly, specify full-precision mode on Apple M1 hardware.
|
||||||
|
|
||||||
InvokeAI offers a locally hosted Web Server & React Frontend, with an industry leading user experience. The Web-based UI allows for simple and intuitive workflows, and is responsive for use on mobile devices and tablets accessing the web server.
|
Precision is auto configured based on the device. If however you encounter
|
||||||
|
errors like 'expected type Float but found Half' or 'not implemented for Half'
|
||||||
|
you can try starting `invoke.py` with the `--precision=float32` flag:
|
||||||
|
|
||||||
### *Unified Canvas*
|
```bash
|
||||||
|
(invokeai) ~/InvokeAI$ python scripts/invoke.py --precision=float32
|
||||||
|
```
|
||||||
|
|
||||||
The Unified Canvas is a fully integrated canvas implementation with support for all core generation capabilities, in/outpainting, brush tools, and more. This creative tool unlocks the capability for artists to create with AI as a creative collaborator, and can be used to augment AI-generated imagery, sketches, photography, renders, and more.
|
### Features
|
||||||
|
|
||||||
### *Advanced Prompt Syntax*
|
#### Major Features
|
||||||
|
|
||||||
InvokeAI's advanced prompt syntax allows for token weighting, cross-attention control, and prompt blending, allowing for fine-tuned tweaking of your invocations and exploration of the latent space.
|
- [Web Server](https://invoke-ai.github.io/InvokeAI/features/WEB/)
|
||||||
|
- [Interactive Command Line Interface](https://invoke-ai.github.io/InvokeAI/features/CLI/)
|
||||||
|
- [Image To Image](https://invoke-ai.github.io/InvokeAI/features/IMG2IMG/)
|
||||||
|
- [Inpainting Support](https://invoke-ai.github.io/InvokeAI/features/INPAINTING/)
|
||||||
|
- [Outpainting Support](https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/)
|
||||||
|
- [Upscaling, face-restoration and outpainting](https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/)
|
||||||
|
- [Reading Prompts From File](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#reading-prompts-from-a-file)
|
||||||
|
- [Prompt Blending](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#prompt-blending)
|
||||||
|
- [Thresholding and Perlin Noise Initialization Options](https://invoke-ai.github.io/InvokeAI/features/OTHER/#thresholding-and-perlin-noise-initialization-options)
|
||||||
|
- [Negative/Unconditioned Prompts](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts)
|
||||||
|
- [Variations](https://invoke-ai.github.io/InvokeAI/features/VARIATIONS/)
|
||||||
|
- [Personalizing Text-to-Image Generation](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/)
|
||||||
|
- [Simplified API for text to image generation](https://invoke-ai.github.io/InvokeAI/features/OTHER/#simplified-api)
|
||||||
|
|
||||||
### *Command Line Interface*
|
#### Other Features
|
||||||
|
|
||||||
For users utilizing a terminal-based environment, or who want to take advantage of CLI features, InvokeAI offers an extensive and actively supported command-line interface that provides the full suite of generation functionality available in the tool.
|
- [Google Colab](https://invoke-ai.github.io/InvokeAI/features/OTHER/#google-colab)
|
||||||
|
- [Seamless Tiling](https://invoke-ai.github.io/InvokeAI/features/OTHER/#seamless-tiling)
|
||||||
### Other features
|
- [Shortcut: Reusing Seeds](https://invoke-ai.github.io/InvokeAI/features/OTHER/#shortcuts-reusing-seeds)
|
||||||
|
- [Preload Models](https://invoke-ai.github.io/InvokeAI/features/OTHER/#preload-models)
|
||||||
- *Support for both ckpt and diffusers models*
|
|
||||||
- *SD 2.0, 2.1 support*
|
|
||||||
- *Noise Control & Tresholding*
|
|
||||||
- *Popular Sampler Support*
|
|
||||||
- *Upscaling & Face Restoration Tools*
|
|
||||||
- *Embedding Manager & Support*
|
|
||||||
- *Model Manager & Support*
|
|
||||||
|
|
||||||
### Coming Soon
|
|
||||||
|
|
||||||
- *Node-Based Architecture & UI*
|
|
||||||
- And more...
|
|
||||||
|
|
||||||
### Latest Changes
|
### Latest Changes
|
||||||
|
|
||||||
For our latest changes, view our [Release
|
- v2.0.1 (13 October 2022)
|
||||||
Notes](https://github.com/invoke-ai/InvokeAI/releases) and the
|
- fix noisy images at high step count when using k* samplers
|
||||||
[CHANGELOG](docs/CHANGELOG.md).
|
- dream.py script now calls invoke.py module directly rather than
|
||||||
|
via a new python process (which could break the environment)
|
||||||
|
|
||||||
## Troubleshooting
|
- v2.0.0 (9 October 2022)
|
||||||
|
|
||||||
|
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
||||||
|
for backward compatibility.
|
||||||
|
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||||
|
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/INPAINTING/">inpainting</a> and <a href="https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/">outpainting</a>
|
||||||
|
- img2img runs on all k* samplers
|
||||||
|
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts">negative prompts</a>
|
||||||
|
- Support for CodeFormer face reconstruction
|
||||||
|
- Support for Textual Inversion on Macintoshes
|
||||||
|
- Support in both WebGUI and CLI for <a href="https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/">post-processing of previously-generated images</a>
|
||||||
|
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
||||||
|
and "embiggen" upscaling. See the `!fix` command.
|
||||||
|
- New `--hires` option on `invoke>` line allows <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/#txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
||||||
|
- New `--perlin` and `--threshold` options allow you to add and control variation
|
||||||
|
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
|
||||||
|
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
||||||
|
and tweaking of previous settings.
|
||||||
|
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
||||||
|
- Improved <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/">command-line completion behavior</a>.
|
||||||
|
New commands added:
|
||||||
|
- List command-line history with `!history`
|
||||||
|
- Search command-line history with `!search`
|
||||||
|
- Clear history with `!clear`
|
||||||
|
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||||
|
configure. To switch away from auto use the new flag like `--precision=float32`.
|
||||||
|
|
||||||
|
For older changelogs, please visit the **[CHANGELOG](https://invoke-ai.github.io/InvokeAI/CHANGELOG#v114-11-september-2022)**.
|
||||||
|
|
||||||
|
### Troubleshooting
|
||||||
|
|
||||||
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
|
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
|
||||||
problems and other issues.
|
problems and other issues.
|
||||||
|
|
||||||
## Contributing
|
# Contributing
|
||||||
|
|
||||||
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
||||||
cleanup, testing, or code reviews, is very much encouraged to do so.
|
cleanup, testing, or code reviews, is very much encouraged to do so. If you are unfamiliar with how
|
||||||
|
|
||||||
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
|
|
||||||
|
|
||||||
If you'd like to help with translation, please see our [translation guide](docs/other/TRANSLATION.md).
|
|
||||||
|
|
||||||
If you are unfamiliar with how
|
|
||||||
to contribute to GitHub projects, here is a
|
to contribute to GitHub projects, here is a
|
||||||
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
|
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github).
|
||||||
|
|
||||||
We hope you enjoy using our software as much as we enjoy creating it,
|
A full set of contribution guidelines, along with templates, are in progress, but for now the most
|
||||||
and we hope that some of those of you who are reading this will elect
|
important thing is to **make your pull request against the "development" branch**, and not against
|
||||||
to become part of our community.
|
"main". This will help keep public breakage to a minimum and will allow you to propose more radical
|
||||||
|
changes.
|
||||||
Welcome to InvokeAI!
|
|
||||||
|
|
||||||
### Contributors
|
### Contributors
|
||||||
|
|
||||||
@ -277,11 +190,15 @@ This fork is a combined effort of various people from across the world.
|
|||||||
[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for
|
[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for
|
||||||
their time, hard work and effort.
|
their time, hard work and effort.
|
||||||
|
|
||||||
Thanks to [Weblate](https://weblate.org/) for generously providing translation services to this project.
|
|
||||||
|
|
||||||
### Support
|
### Support
|
||||||
|
|
||||||
For support, please use this repository's GitHub Issues tracking service, or join the Discord.
|
For support, please use this repository's GitHub Issues tracking service. Feel free to send me an
|
||||||
|
email if you use and like the script.
|
||||||
|
|
||||||
Original portions of the software are Copyright (c) 2023 by respective contributors.
|
Original portions of the software are Copyright (c) 2020
|
||||||
|
[Lincoln D. Stein](https://github.com/lstein)
|
||||||
|
|
||||||
|
### Further Reading
|
||||||
|
|
||||||
|
Please see the original README for more information on this software and underlying algorithm,
|
||||||
|
located in the file [README-CompViz.md](https://invoke-ai.github.io/InvokeAI/other/README-CompViz/).
|
||||||
|
Before Width: | Height: | Size: 651 KiB After Width: | Height: | Size: 651 KiB |
Before Width: | Height: | Size: 596 KiB After Width: | Height: | Size: 596 KiB |
Before Width: | Height: | Size: 609 KiB After Width: | Height: | Size: 609 KiB |
Before Width: | Height: | Size: 548 KiB After Width: | Height: | Size: 548 KiB |
Before Width: | Height: | Size: 705 KiB After Width: | Height: | Size: 705 KiB |
Before Width: | Height: | Size: 757 KiB After Width: | Height: | Size: 757 KiB |
Before Width: | Height: | Size: 33 KiB After Width: | Height: | Size: 33 KiB |
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 466 KiB After Width: | Height: | Size: 466 KiB |
Before Width: | Height: | Size: 7.4 KiB After Width: | Height: | Size: 7.4 KiB |
Before Width: | Height: | Size: 539 KiB After Width: | Height: | Size: 539 KiB |
Before Width: | Height: | Size: 7.6 KiB After Width: | Height: | Size: 7.6 KiB |
Before Width: | Height: | Size: 450 KiB After Width: | Height: | Size: 450 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 553 KiB After Width: | Height: | Size: 553 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 418 KiB After Width: | Height: | Size: 418 KiB |
Before Width: | Height: | Size: 6.1 KiB After Width: | Height: | Size: 6.1 KiB |
Before Width: | Height: | Size: 542 KiB After Width: | Height: | Size: 542 KiB |
Before Width: | Height: | Size: 9.5 KiB After Width: | Height: | Size: 9.5 KiB |
Before Width: | Height: | Size: 395 KiB After Width: | Height: | Size: 395 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 465 KiB After Width: | Height: | Size: 465 KiB |
Before Width: | Height: | Size: 7.8 KiB After Width: | Height: | Size: 7.8 KiB |
Before Width: | Height: | Size: 612 KiB After Width: | Height: | Size: 612 KiB |
Before Width: | Height: | Size: 312 KiB After Width: | Height: | Size: 312 KiB |
Before Width: | Height: | Size: 72 KiB After Width: | Height: | Size: 72 KiB |
Before Width: | Height: | Size: 319 KiB After Width: | Height: | Size: 319 KiB |
Before Width: | Height: | Size: 788 KiB After Width: | Height: | Size: 788 KiB |
Before Width: | Height: | Size: 958 KiB After Width: | Height: | Size: 958 KiB |
Before Width: | Height: | Size: 9.4 MiB After Width: | Height: | Size: 9.4 MiB |
Before Width: | Height: | Size: 610 KiB After Width: | Height: | Size: 610 KiB |
Before Width: | Height: | Size: 1.1 MiB After Width: | Height: | Size: 1.1 MiB |
Before Width: | Height: | Size: 1.3 MiB After Width: | Height: | Size: 1.3 MiB |
Before Width: | Height: | Size: 945 KiB After Width: | Height: | Size: 945 KiB |
Before Width: | Height: | Size: 972 KiB After Width: | Height: | Size: 972 KiB |
Before Width: | Height: | Size: 662 KiB After Width: | Height: | Size: 662 KiB |
Before Width: | Height: | Size: 302 KiB After Width: | Height: | Size: 302 KiB |
Before Width: | Height: | Size: 2.2 MiB After Width: | Height: | Size: 2.2 MiB |
1264
backend/invoke_ai_web_server.py
Normal file
69
backend/modules/parameters.py
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
from backend.modules.parse_seed_weights import parse_seed_weights
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
SAMPLER_CHOICES = [
|
||||||
|
"ddim",
|
||||||
|
"k_dpm_2_a",
|
||||||
|
"k_dpm_2",
|
||||||
|
"k_euler_a",
|
||||||
|
"k_euler",
|
||||||
|
"k_heun",
|
||||||
|
"k_lms",
|
||||||
|
"plms",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def parameters_to_command(params):
|
||||||
|
"""
|
||||||
|
Converts dict of parameters into a `invoke.py` REPL command.
|
||||||
|
"""
|
||||||
|
|
||||||
|
switches = list()
|
||||||
|
|
||||||
|
if "prompt" in params:
|
||||||
|
switches.append(f'"{params["prompt"]}"')
|
||||||
|
if "steps" in params:
|
||||||
|
switches.append(f'-s {params["steps"]}')
|
||||||
|
if "seed" in params:
|
||||||
|
switches.append(f'-S {params["seed"]}')
|
||||||
|
if "width" in params:
|
||||||
|
switches.append(f'-W {params["width"]}')
|
||||||
|
if "height" in params:
|
||||||
|
switches.append(f'-H {params["height"]}')
|
||||||
|
if "cfg_scale" in params:
|
||||||
|
switches.append(f'-C {params["cfg_scale"]}')
|
||||||
|
if "sampler_name" in params:
|
||||||
|
switches.append(f'-A {params["sampler_name"]}')
|
||||||
|
if "seamless" in params and params["seamless"] == True:
|
||||||
|
switches.append(f"--seamless")
|
||||||
|
if "hires_fix" in params and params["hires_fix"] == True:
|
||||||
|
switches.append(f"--hires")
|
||||||
|
if "init_img" in params and len(params["init_img"]) > 0:
|
||||||
|
switches.append(f'-I {params["init_img"]}')
|
||||||
|
if "init_mask" in params and len(params["init_mask"]) > 0:
|
||||||
|
switches.append(f'-M {params["init_mask"]}')
|
||||||
|
if "init_color" in params and len(params["init_color"]) > 0:
|
||||||
|
switches.append(f'--init_color {params["init_color"]}')
|
||||||
|
if "strength" in params and "init_img" in params:
|
||||||
|
switches.append(f'-f {params["strength"]}')
|
||||||
|
if "fit" in params and params["fit"] == True:
|
||||||
|
switches.append(f"--fit")
|
||||||
|
if "facetool" in params:
|
||||||
|
switches.append(f'-ft {params["facetool"]}')
|
||||||
|
if "facetool_strength" in params and params["facetool_strength"]:
|
||||||
|
switches.append(f'-G {params["facetool_strength"]}')
|
||||||
|
elif "gfpgan_strength" in params and params["gfpgan_strength"]:
|
||||||
|
switches.append(f'-G {params["gfpgan_strength"]}')
|
||||||
|
if "codeformer_fidelity" in params:
|
||||||
|
switches.append(f'-cf {params["codeformer_fidelity"]}')
|
||||||
|
if "upscale" in params and params["upscale"]:
|
||||||
|
switches.append(f'-U {params["upscale"][0]} {params["upscale"][1]}')
|
||||||
|
if "variation_amount" in params and params["variation_amount"] > 0:
|
||||||
|
switches.append(f'-v {params["variation_amount"]}')
|
||||||
|
if "with_variations" in params:
|
||||||
|
seed_weight_pairs = ",".join(
|
||||||
|
f"{seed}:{weight}" for seed, weight in params["with_variations"]
|
||||||
|
)
|
||||||
|
switches.append(f"-V {seed_weight_pairs}")
|
||||||
|
|
||||||
|
return " ".join(switches)
|
27
configs/models.yaml.example
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
# This file describes the alternative machine learning models
|
||||||
|
# available to InvokeAI script.
|
||||||
|
#
|
||||||
|
# To add a new model, follow the examples below. Each
|
||||||
|
# model requires a model config file, a weights file,
|
||||||
|
# and the width and height of the images it
|
||||||
|
# was trained on.
|
||||||
|
stable-diffusion-1.5:
|
||||||
|
description: The newest Stable Diffusion version 1.5 weight file (4.27 GB)
|
||||||
|
weights: ./models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt
|
||||||
|
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||||
|
width: 512
|
||||||
|
height: 512
|
||||||
|
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||||
|
default: true
|
||||||
|
stable-diffusion-1.4:
|
||||||
|
description: Stable Diffusion inference model version 1.4
|
||||||
|
config: configs/stable-diffusion/v1-inference.yaml
|
||||||
|
weights: models/ldm/stable-diffusion-v1/sd-v1-4.ckpt
|
||||||
|
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||||
|
width: 512
|
||||||
|
height: 512
|
||||||
|
inpainting-1.5:
|
||||||
|
weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
|
||||||
|
config: configs/stable-diffusion/v1-inpainting-inference.yaml
|
||||||
|
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||||
|
description: RunwayML SD 1.5 model optimized for inpainting
|
110
configs/stable-diffusion/v1-finetune.yaml
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
model:
|
||||||
|
base_learning_rate: 5.0e-03
|
||||||
|
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||||
|
params:
|
||||||
|
linear_start: 0.00085
|
||||||
|
linear_end: 0.0120
|
||||||
|
num_timesteps_cond: 1
|
||||||
|
log_every_t: 200
|
||||||
|
timesteps: 1000
|
||||||
|
first_stage_key: image
|
||||||
|
cond_stage_key: caption
|
||||||
|
image_size: 64
|
||||||
|
channels: 4
|
||||||
|
cond_stage_trainable: true # Note: different from the one we trained before
|
||||||
|
conditioning_key: crossattn
|
||||||
|
monitor: val/loss_simple_ema
|
||||||
|
scale_factor: 0.18215
|
||||||
|
use_ema: False
|
||||||
|
embedding_reg_weight: 0.0
|
||||||
|
|
||||||
|
personalization_config:
|
||||||
|
target: ldm.modules.embedding_manager.EmbeddingManager
|
||||||
|
params:
|
||||||
|
placeholder_strings: ["*"]
|
||||||
|
initializer_words: ["sculpture"]
|
||||||
|
per_image_tokens: false
|
||||||
|
num_vectors_per_token: 1
|
||||||
|
progressive_words: False
|
||||||
|
|
||||||
|
unet_config:
|
||||||
|
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||||
|
params:
|
||||||
|
image_size: 32 # unused
|
||||||
|
in_channels: 4
|
||||||
|
out_channels: 4
|
||||||
|
model_channels: 320
|
||||||
|
attention_resolutions: [ 4, 2, 1 ]
|
||||||
|
num_res_blocks: 2
|
||||||
|
channel_mult: [ 1, 2, 4, 4 ]
|
||||||
|
num_heads: 8
|
||||||
|
use_spatial_transformer: True
|
||||||
|
transformer_depth: 1
|
||||||
|
context_dim: 768
|
||||||
|
use_checkpoint: True
|
||||||
|
legacy: False
|
||||||
|
|
||||||
|
first_stage_config:
|
||||||
|
target: ldm.models.autoencoder.AutoencoderKL
|
||||||
|
params:
|
||||||
|
embed_dim: 4
|
||||||
|
monitor: val/rec_loss
|
||||||
|
ddconfig:
|
||||||
|
double_z: true
|
||||||
|
z_channels: 4
|
||||||
|
resolution: 256
|
||||||
|
in_channels: 3
|
||||||
|
out_ch: 3
|
||||||
|
ch: 128
|
||||||
|
ch_mult:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 4
|
||||||
|
- 4
|
||||||
|
num_res_blocks: 2
|
||||||
|
attn_resolutions: []
|
||||||
|
dropout: 0.0
|
||||||
|
lossconfig:
|
||||||
|
target: torch.nn.Identity
|
||||||
|
|
||||||
|
cond_stage_config:
|
||||||
|
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
||||||
|
|
||||||
|
data:
|
||||||
|
target: main.DataModuleFromConfig
|
||||||
|
params:
|
||||||
|
batch_size: 1
|
||||||
|
num_workers: 2
|
||||||
|
wrap: false
|
||||||
|
train:
|
||||||
|
target: ldm.data.personalized.PersonalizedBase
|
||||||
|
params:
|
||||||
|
size: 512
|
||||||
|
set: train
|
||||||
|
per_image_tokens: false
|
||||||
|
repeats: 100
|
||||||
|
validation:
|
||||||
|
target: ldm.data.personalized.PersonalizedBase
|
||||||
|
params:
|
||||||
|
size: 512
|
||||||
|
set: val
|
||||||
|
per_image_tokens: false
|
||||||
|
repeats: 10
|
||||||
|
|
||||||
|
lightning:
|
||||||
|
modelcheckpoint:
|
||||||
|
params:
|
||||||
|
every_n_train_steps: 500
|
||||||
|
callbacks:
|
||||||
|
image_logger:
|
||||||
|
target: main.ImageLogger
|
||||||
|
params:
|
||||||
|
batch_frequency: 500
|
||||||
|
max_images: 8
|
||||||
|
increase_log_steps: False
|
||||||
|
|
||||||
|
trainer:
|
||||||
|
benchmark: True
|
||||||
|
max_steps: 4000000
|
||||||
|
# max_steps: 4000
|
||||||
|
|
79
configs/stable-diffusion/v1-inference.yaml
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
model:
|
||||||
|
base_learning_rate: 1.0e-04
|
||||||
|
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||||
|
params:
|
||||||
|
linear_start: 0.00085
|
||||||
|
linear_end: 0.0120
|
||||||
|
num_timesteps_cond: 1
|
||||||
|
log_every_t: 200
|
||||||
|
timesteps: 1000
|
||||||
|
first_stage_key: "jpg"
|
||||||
|
cond_stage_key: "txt"
|
||||||
|
image_size: 64
|
||||||
|
channels: 4
|
||||||
|
cond_stage_trainable: false # Note: different from the one we trained before
|
||||||
|
conditioning_key: crossattn
|
||||||
|
monitor: val/loss_simple_ema
|
||||||
|
scale_factor: 0.18215
|
||||||
|
use_ema: False
|
||||||
|
|
||||||
|
scheduler_config: # 10000 warmup steps
|
||||||
|
target: ldm.lr_scheduler.LambdaLinearScheduler
|
||||||
|
params:
|
||||||
|
warm_up_steps: [ 10000 ]
|
||||||
|
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
||||||
|
f_start: [ 1.e-6 ]
|
||||||
|
f_max: [ 1. ]
|
||||||
|
f_min: [ 1. ]
|
||||||
|
|
||||||
|
personalization_config:
|
||||||
|
target: ldm.modules.embedding_manager.EmbeddingManager
|
||||||
|
params:
|
||||||
|
placeholder_strings: ["*"]
|
||||||
|
initializer_words: ['face', 'man', 'photo', 'africanmale']
|
||||||
|
per_image_tokens: false
|
||||||
|
num_vectors_per_token: 1
|
||||||
|
progressive_words: False
|
||||||
|
|
||||||
|
unet_config:
|
||||||
|
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||||
|
params:
|
||||||
|
image_size: 32 # unused
|
||||||
|
in_channels: 4
|
||||||
|
out_channels: 4
|
||||||
|
model_channels: 320
|
||||||
|
attention_resolutions: [ 4, 2, 1 ]
|
||||||
|
num_res_blocks: 2
|
||||||
|
channel_mult: [ 1, 2, 4, 4 ]
|
||||||
|
num_heads: 8
|
||||||
|
use_spatial_transformer: True
|
||||||
|
transformer_depth: 1
|
||||||
|
context_dim: 768
|
||||||
|
use_checkpoint: True
|
||||||
|
legacy: False
|
||||||
|
|
||||||
|
first_stage_config:
|
||||||
|
target: ldm.models.autoencoder.AutoencoderKL
|
||||||
|
params:
|
||||||
|
embed_dim: 4
|
||||||
|
monitor: val/rec_loss
|
||||||
|
ddconfig:
|
||||||
|
double_z: true
|
||||||
|
z_channels: 4
|
||||||
|
resolution: 256
|
||||||
|
in_channels: 3
|
||||||
|
out_ch: 3
|
||||||
|
ch: 128
|
||||||
|
ch_mult:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 4
|
||||||
|
- 4
|
||||||
|
num_res_blocks: 2
|
||||||
|
attn_resolutions: []
|
||||||
|
dropout: 0.0
|
||||||
|
lossconfig:
|
||||||
|
target: torch.nn.Identity
|
||||||
|
|
||||||
|
cond_stage_config:
|
||||||
|
target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder
|
79
configs/stable-diffusion/v1-inpainting-inference.yaml
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
model:
|
||||||
|
base_learning_rate: 7.5e-05
|
||||||
|
target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
|
||||||
|
params:
|
||||||
|
linear_start: 0.00085
|
||||||
|
linear_end: 0.0120
|
||||||
|
num_timesteps_cond: 1
|
||||||
|
log_every_t: 200
|
||||||
|
timesteps: 1000
|
||||||
|
first_stage_key: "jpg"
|
||||||
|
cond_stage_key: "txt"
|
||||||
|
image_size: 64
|
||||||
|
channels: 4
|
||||||
|
cond_stage_trainable: false # Note: different from the one we trained before
|
||||||
|
conditioning_key: hybrid # important
|
||||||
|
monitor: val/loss_simple_ema
|
||||||
|
scale_factor: 0.18215
|
||||||
|
finetune_keys: null
|
||||||
|
|
||||||
|
scheduler_config: # 10000 warmup steps
|
||||||
|
target: ldm.lr_scheduler.LambdaLinearScheduler
|
||||||
|
params:
|
||||||
|
warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
|
||||||
|
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
||||||
|
f_start: [ 1.e-6 ]
|
||||||
|
f_max: [ 1. ]
|
||||||
|
f_min: [ 1. ]
|
||||||
|
|
||||||
|
personalization_config:
|
||||||
|
target: ldm.modules.embedding_manager.EmbeddingManager
|
||||||
|
params:
|
||||||
|
placeholder_strings: ["*"]
|
||||||
|
initializer_words: ['face', 'man', 'photo', 'africanmale']
|
||||||
|
per_image_tokens: false
|
||||||
|
num_vectors_per_token: 1
|
||||||
|
progressive_words: False
|
||||||
|
|
||||||
|
unet_config:
|
||||||
|
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||||
|
params:
|
||||||
|
image_size: 32 # unused
|
||||||
|
in_channels: 9 # 4 data + 4 downscaled image + 1 mask
|
||||||
|
out_channels: 4
|
||||||
|
model_channels: 320
|
||||||
|
attention_resolutions: [ 4, 2, 1 ]
|
||||||
|
num_res_blocks: 2
|
||||||
|
channel_mult: [ 1, 2, 4, 4 ]
|
||||||
|
num_heads: 8
|
||||||
|
use_spatial_transformer: True
|
||||||
|
transformer_depth: 1
|
||||||
|
context_dim: 768
|
||||||
|
use_checkpoint: True
|
||||||
|
legacy: False
|
||||||
|
|
||||||
|
first_stage_config:
|
||||||
|
target: ldm.models.autoencoder.AutoencoderKL
|
||||||
|
params:
|
||||||
|
embed_dim: 4
|
||||||
|
monitor: val/rec_loss
|
||||||
|
ddconfig:
|
||||||
|
double_z: true
|
||||||
|
z_channels: 4
|
||||||
|
resolution: 256
|
||||||
|
in_channels: 3
|
||||||
|
out_ch: 3
|
||||||
|
ch: 128
|
||||||
|
ch_mult:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 4
|
||||||
|
- 4
|
||||||
|
num_res_blocks: 2
|
||||||
|
attn_resolutions: []
|
||||||
|
dropout: 0.0
|
||||||
|
lossconfig:
|
||||||
|
target: torch.nn.Identity
|
||||||
|
|
||||||
|
cond_stage_config:
|
||||||
|
target: ldm.modules.encoders.modules.WeightedFrozenCLIPEmbedder
|
110
configs/stable-diffusion/v1-m1-finetune.yaml
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
model:
|
||||||
|
base_learning_rate: 5.0e-03
|
||||||
|
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
||||||
|
params:
|
||||||
|
linear_start: 0.00085
|
||||||
|
linear_end: 0.0120
|
||||||
|
num_timesteps_cond: 1
|
||||||
|
log_every_t: 200
|
||||||
|
timesteps: 1000
|
||||||
|
first_stage_key: image
|
||||||
|
cond_stage_key: caption
|
||||||
|
image_size: 64
|
||||||
|
channels: 4
|
||||||
|
cond_stage_trainable: true # Note: different from the one we trained before
|
||||||
|
conditioning_key: crossattn
|
||||||
|
monitor: val/loss_simple_ema
|
||||||
|
scale_factor: 0.18215
|
||||||
|
use_ema: False
|
||||||
|
embedding_reg_weight: 0.0
|
||||||
|
|
||||||
|
personalization_config:
|
||||||
|
target: ldm.modules.embedding_manager.EmbeddingManager
|
||||||
|
params:
|
||||||
|
placeholder_strings: ["*"]
|
||||||
|
initializer_words: ['face', 'man', 'photo', 'africanmale']
|
||||||
|
per_image_tokens: false
|
||||||
|
num_vectors_per_token: 6
|
||||||
|
progressive_words: False
|
||||||
|
|
||||||
|
unet_config:
|
||||||
|
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
||||||
|
params:
|
||||||
|
image_size: 32 # unused
|
||||||
|
in_channels: 4
|
||||||
|
out_channels: 4
|
||||||
|
model_channels: 320
|
||||||
|
attention_resolutions: [ 4, 2, 1 ]
|
||||||
|
num_res_blocks: 2
|
||||||
|
channel_mult: [ 1, 2, 4, 4 ]
|
||||||
|
num_heads: 8
|
||||||
|
use_spatial_transformer: True
|
||||||
|
transformer_depth: 1
|
||||||
|
context_dim: 768
|
||||||
|
use_checkpoint: True
|
||||||
|
legacy: False
|
||||||
|
|
||||||
|
first_stage_config:
|
||||||
|
target: ldm.models.autoencoder.AutoencoderKL
|
||||||
|
params:
|
||||||
|
embed_dim: 4
|
||||||
|
monitor: val/rec_loss
|
||||||
|
ddconfig:
|
||||||
|
double_z: true
|
||||||
|
z_channels: 4
|
||||||
|
resolution: 256
|
||||||
|
in_channels: 3
|
||||||
|
out_ch: 3
|
||||||
|
ch: 128
|
||||||
|
ch_mult:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 4
|
||||||
|
- 4
|
||||||
|
num_res_blocks: 2
|
||||||
|
attn_resolutions: []
|
||||||
|
dropout: 0.0
|
||||||
|
lossconfig:
|
||||||
|
target: torch.nn.Identity
|
||||||
|
|
||||||
|
cond_stage_config:
|
||||||
|
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
||||||
|
|
||||||
|
data:
|
||||||
|
target: main.DataModuleFromConfig
|
||||||
|
params:
|
||||||
|
batch_size: 1
|
||||||
|
num_workers: 2
|
||||||
|
wrap: false
|
||||||
|
train:
|
||||||
|
target: ldm.data.personalized.PersonalizedBase
|
||||||
|
params:
|
||||||
|
size: 512
|
||||||
|
set: train
|
||||||
|
per_image_tokens: false
|
||||||
|
repeats: 100
|
||||||
|
validation:
|
||||||
|
target: ldm.data.personalized.PersonalizedBase
|
||||||
|
params:
|
||||||
|
size: 512
|
||||||
|
set: val
|
||||||
|
per_image_tokens: false
|
||||||
|
repeats: 10
|
||||||
|
|
||||||
|
lightning:
|
||||||
|
modelcheckpoint:
|
||||||
|
params:
|
||||||
|
every_n_train_steps: 500
|
||||||
|
callbacks:
|
||||||
|
image_logger:
|
||||||
|
target: main.ImageLogger
|
||||||
|
params:
|
||||||
|
batch_frequency: 500
|
||||||
|
max_images: 5
|
||||||
|
increase_log_steps: False
|
||||||
|
|
||||||
|
trainer:
|
||||||
|
benchmark: False
|
||||||
|
max_steps: 6200
|
||||||
|
# max_steps: 4000
|
||||||
|
|
84
docker-build/Dockerfile
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
FROM ubuntu AS get_miniconda
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
|
||||||
|
# install wget
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
wget \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# download and install miniconda
|
||||||
|
ARG conda_version=py39_4.12.0-Linux-x86_64
|
||||||
|
ARG conda_prefix=/opt/conda
|
||||||
|
RUN wget --progress=dot:giga -O /miniconda.sh \
|
||||||
|
https://repo.anaconda.com/miniconda/Miniconda3-${conda_version}.sh \
|
||||||
|
&& bash /miniconda.sh -b -p ${conda_prefix} \
|
||||||
|
&& rm -f /miniconda.sh
|
||||||
|
|
||||||
|
FROM ubuntu AS invokeai
|
||||||
|
|
||||||
|
# use bash
|
||||||
|
SHELL [ "/bin/bash", "-c" ]
|
||||||
|
|
||||||
|
# clean bashrc
|
||||||
|
RUN echo "" > ~/.bashrc
|
||||||
|
|
||||||
|
# Install necesarry packages
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
gcc \
|
||||||
|
git \
|
||||||
|
libgl1-mesa-glx \
|
||||||
|
libglib2.0-0 \
|
||||||
|
pip \
|
||||||
|
python3 \
|
||||||
|
python3-dev \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# clone repository, create models.yaml and create symlinks
|
||||||
|
ARG invokeai_git=invoke-ai/InvokeAI
|
||||||
|
ARG invokeai_branch=main
|
||||||
|
ARG project_name=invokeai
|
||||||
|
ARG conda_env_file=environment-lin-cuda.yml
|
||||||
|
RUN git clone -b ${invokeai_branch} https://github.com/${invokeai_git}.git "/${project_name}" \
|
||||||
|
&& cp \
|
||||||
|
"/${project_name}/configs/models.yaml.example" \
|
||||||
|
"/${project_name}/configs/models.yaml" \
|
||||||
|
&& ln -sf \
|
||||||
|
"/${project_name}/environments-and-requirements/${conda_env_file}" \
|
||||||
|
"/${project_name}/environment.yml" \
|
||||||
|
&& ln -sf \
|
||||||
|
/data/models/v1-5-pruned-emaonly.ckpt \
|
||||||
|
"/${project_name}/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt" \
|
||||||
|
&& ln -sf \
|
||||||
|
/data/outputs/ \
|
||||||
|
"/${project_name}/outputs"
|
||||||
|
|
||||||
|
# set workdir
|
||||||
|
WORKDIR "/${project_name}"
|
||||||
|
|
||||||
|
# install conda env and preload models
|
||||||
|
ARG conda_prefix=/opt/conda
|
||||||
|
COPY --from=get_miniconda "${conda_prefix}" "${conda_prefix}"
|
||||||
|
RUN source "${conda_prefix}/etc/profile.d/conda.sh" \
|
||||||
|
&& conda init bash \
|
||||||
|
&& source ~/.bashrc \
|
||||||
|
&& conda env create \
|
||||||
|
--name "${project_name}" \
|
||||||
|
&& rm -Rf ~/.cache \
|
||||||
|
&& conda clean -afy \
|
||||||
|
&& echo "conda activate ${project_name}" >> ~/.bashrc
|
||||||
|
|
||||||
|
RUN source ~/.bashrc \
|
||||||
|
&& python scripts/preload_models.py \
|
||||||
|
--no-interactive
|
||||||
|
|
||||||
|
# Copy entrypoint and set env
|
||||||
|
ENV CONDA_PREFIX="${conda_prefix}"
|
||||||
|
ENV PROJECT_NAME="${project_name}"
|
||||||
|
COPY docker-build/entrypoint.sh /
|
||||||
|
ENTRYPOINT [ "/entrypoint.sh" ]
|
84
docker-build/build.sh
Executable file
@ -0,0 +1,84 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoint!!!
|
||||||
|
# configure values by using env when executing build.sh
|
||||||
|
# f.e. env ARCH=aarch64 GITHUB_INVOKE_AI=https://github.com/yourname/yourfork.git ./build.sh
|
||||||
|
|
||||||
|
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
||||||
|
|
||||||
|
invokeai_conda_version=${INVOKEAI_CONDA_VERSION:-py39_4.12.0-${platform/\//-}}
|
||||||
|
invokeai_conda_prefix=${INVOKEAI_CONDA_PREFIX:-\/opt\/conda}
|
||||||
|
invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment-lin-cuda.yml}
|
||||||
|
invokeai_git=${INVOKEAI_GIT:-invoke-ai/InvokeAI}
|
||||||
|
invokeai_branch=${INVOKEAI_BRANCH:-main}
|
||||||
|
huggingface_token=${HUGGINGFACE_TOKEN?}
|
||||||
|
|
||||||
|
# print the settings
|
||||||
|
echo "You are using these values:"
|
||||||
|
echo -e "project_name:\t\t ${project_name}"
|
||||||
|
echo -e "volumename:\t\t ${volumename}"
|
||||||
|
echo -e "arch:\t\t\t ${arch}"
|
||||||
|
echo -e "platform:\t\t ${platform}"
|
||||||
|
echo -e "invokeai_conda_version:\t ${invokeai_conda_version}"
|
||||||
|
echo -e "invokeai_conda_prefix:\t ${invokeai_conda_prefix}"
|
||||||
|
echo -e "invokeai_conda_env_file: ${invokeai_conda_env_file}"
|
||||||
|
echo -e "invokeai_git:\t\t ${invokeai_git}"
|
||||||
|
echo -e "invokeai_tag:\t\t ${invokeai_tag}\n"
|
||||||
|
|
||||||
|
_runAlpine() {
|
||||||
|
docker run \
|
||||||
|
--rm \
|
||||||
|
--interactive \
|
||||||
|
--tty \
|
||||||
|
--mount source="$volumename",target=/data \
|
||||||
|
--workdir /data \
|
||||||
|
alpine "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
_copyCheckpoints() {
|
||||||
|
echo "creating subfolders for models and outputs"
|
||||||
|
_runAlpine mkdir models
|
||||||
|
_runAlpine mkdir outputs
|
||||||
|
echo "downloading v1-5-pruned-emaonly.ckpt"
|
||||||
|
_runAlpine wget \
|
||||||
|
--header="Authorization: Bearer ${huggingface_token}" \
|
||||||
|
-O models/v1-5-pruned-emaonly.ckpt \
|
||||||
|
https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||||
|
echo "done"
|
||||||
|
}
|
||||||
|
|
||||||
|
_checkVolumeContent() {
|
||||||
|
_runAlpine ls -lhA /data/models
|
||||||
|
}
|
||||||
|
|
||||||
|
_getModelMd5s() {
|
||||||
|
_runAlpine \
|
||||||
|
alpine sh -c "md5sum /data/models/*.ckpt"
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ -n "$(docker volume ls -f name="${volumename}" -q)" ]]; then
|
||||||
|
echo "Volume already exists"
|
||||||
|
if [[ -z "$(_checkVolumeContent)" ]]; then
|
||||||
|
echo "looks empty, copying checkpoint"
|
||||||
|
_copyCheckpoints
|
||||||
|
fi
|
||||||
|
echo "Models in ${volumename}:"
|
||||||
|
_checkVolumeContent
|
||||||
|
else
|
||||||
|
echo -n "createing docker volume "
|
||||||
|
docker volume create "${volumename}"
|
||||||
|
_copyCheckpoints
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Build Container
|
||||||
|
docker build \
|
||||||
|
--platform="${platform}" \
|
||||||
|
--tag "${invokeai_tag}" \
|
||||||
|
--build-arg project_name="${project_name}" \
|
||||||
|
--build-arg conda_version="${invokeai_conda_version}" \
|
||||||
|
--build-arg conda_prefix="${invokeai_conda_prefix}" \
|
||||||
|
--build-arg conda_env_file="${invokeai_conda_env_file}" \
|
||||||
|
--build-arg invokeai_git="${invokeai_git}" \
|
||||||
|
--build-arg invokeai_branch="${invokeai_branch}" \
|
||||||
|
--file ./docker-build/Dockerfile \
|
||||||
|
.
|
8
docker-build/entrypoint.sh
Executable file
@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
source "${CONDA_PREFIX}/etc/profile.d/conda.sh"
|
||||||
|
conda activate "${PROJECT_NAME}"
|
||||||
|
|
||||||
|
python scripts/invoke.py \
|
||||||
|
${@:---web --host=0.0.0.0}
|
13
docker-build/env.sh
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
project_name=${PROJECT_NAME:-invokeai}
|
||||||
|
volumename=${VOLUMENAME:-${project_name}_data}
|
||||||
|
arch=${ARCH:-x86_64}
|
||||||
|
platform=${PLATFORM:-Linux/${arch}}
|
||||||
|
invokeai_tag=${INVOKEAI_TAG:-${project_name}-${arch}}
|
||||||
|
|
||||||
|
export project_name
|
||||||
|
export volumename
|
||||||
|
export arch
|
||||||
|
export platform
|
||||||
|
export invokeai_tag
|
15
docker-build/run.sh
Executable file
@ -0,0 +1,15 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
||||||
|
|
||||||
|
docker run \
|
||||||
|
--interactive \
|
||||||
|
--tty \
|
||||||
|
--rm \
|
||||||
|
--platform "$platform" \
|
||||||
|
--name "$project_name" \
|
||||||
|
--hostname "$project_name" \
|
||||||
|
--mount source="$volumename",target=/data \
|
||||||
|
--publish 9090:9090 \
|
||||||
|
"$invokeai_tag" ${1:+$@}
|
@ -1,103 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
|
|
||||||
ARG PYTHON_VERSION=3.9
|
|
||||||
##################
|
|
||||||
## base image ##
|
|
||||||
##################
|
|
||||||
FROM python:${PYTHON_VERSION}-slim AS python-base
|
|
||||||
|
|
||||||
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
|
||||||
|
|
||||||
# prepare for buildkit cache
|
|
||||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
|
||||||
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
|
|
||||||
|
|
||||||
# Install necessary packages
|
|
||||||
RUN \
|
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
||||||
apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libgl1-mesa-glx=20.3.* \
|
|
||||||
libglib2.0-0=2.66.* \
|
|
||||||
libopencv-dev=4.5.*
|
|
||||||
|
|
||||||
# set working directory and env
|
|
||||||
ARG APPDIR=/usr/src
|
|
||||||
ARG APPNAME=InvokeAI
|
|
||||||
WORKDIR ${APPDIR}
|
|
||||||
ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH
|
|
||||||
# Keeps Python from generating .pyc files in the container
|
|
||||||
ENV PYTHONDONTWRITEBYTECODE 1
|
|
||||||
# Turns off buffering for easier container logging
|
|
||||||
ENV PYTHONUNBUFFERED 1
|
|
||||||
# don't fall back to legacy build system
|
|
||||||
ENV PIP_USE_PEP517=1
|
|
||||||
|
|
||||||
#######################
|
|
||||||
## build pyproject ##
|
|
||||||
#######################
|
|
||||||
FROM python-base AS pyproject-builder
|
|
||||||
|
|
||||||
# Install dependencies
|
|
||||||
RUN \
|
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
||||||
apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
build-essential=12.9 \
|
|
||||||
gcc=4:10.2.* \
|
|
||||||
python3-dev=3.9.*
|
|
||||||
|
|
||||||
# prepare pip for buildkit cache
|
|
||||||
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
|
|
||||||
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
|
|
||||||
RUN mkdir -p ${PIP_CACHE_DIR}
|
|
||||||
|
|
||||||
# create virtual environment
|
|
||||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
|
|
||||||
python3 -m venv "${APPNAME}" \
|
|
||||||
--upgrade-deps
|
|
||||||
|
|
||||||
# copy sources
|
|
||||||
COPY --link . .
|
|
||||||
|
|
||||||
# install pyproject.toml
|
|
||||||
ARG PIP_EXTRA_INDEX_URL
|
|
||||||
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
|
||||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
|
|
||||||
"${APPNAME}/bin/pip" install .
|
|
||||||
|
|
||||||
# build patchmatch
|
|
||||||
RUN python3 -c "from patchmatch import patch_match"
|
|
||||||
|
|
||||||
#####################
|
|
||||||
## runtime image ##
|
|
||||||
#####################
|
|
||||||
FROM python-base AS runtime
|
|
||||||
|
|
||||||
# Create a new user
|
|
||||||
ARG UNAME=appuser
|
|
||||||
RUN useradd \
|
|
||||||
--no-log-init \
|
|
||||||
-m \
|
|
||||||
-U \
|
|
||||||
"${UNAME}"
|
|
||||||
|
|
||||||
# create volume directory
|
|
||||||
ARG VOLUME_DIR=/data
|
|
||||||
RUN mkdir -p "${VOLUME_DIR}" \
|
|
||||||
&& chown -R "${UNAME}" "${VOLUME_DIR}"
|
|
||||||
|
|
||||||
# setup runtime environment
|
|
||||||
USER ${UNAME}
|
|
||||||
COPY --chown=${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME}
|
|
||||||
ENV INVOKEAI_ROOT ${VOLUME_DIR}
|
|
||||||
ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache
|
|
||||||
ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only"
|
|
||||||
EXPOSE 9090
|
|
||||||
ENTRYPOINT [ "invokeai" ]
|
|
||||||
CMD [ "--web", "--host", "0.0.0.0", "--port", "9090" ]
|
|
||||||
VOLUME [ "${VOLUME_DIR}" ]
|
|
@ -1,51 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# If you want to build a specific flavor, set the CONTAINER_FLAVOR environment variable
|
|
||||||
# e.g. CONTAINER_FLAVOR=cpu ./build.sh
|
|
||||||
# Possible Values are:
|
|
||||||
# - cpu
|
|
||||||
# - cuda
|
|
||||||
# - rocm
|
|
||||||
# Don't forget to also set it when executing run.sh
|
|
||||||
# if it is not set, the script will try to detect the flavor by itself.
|
|
||||||
#
|
|
||||||
# Doc can be found here:
|
|
||||||
# https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
|
||||||
|
|
||||||
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
|
||||||
cd "$SCRIPTDIR" || exit 1
|
|
||||||
|
|
||||||
source ./env.sh
|
|
||||||
|
|
||||||
DOCKERFILE=${INVOKE_DOCKERFILE:-./Dockerfile}
|
|
||||||
|
|
||||||
# print the settings
|
|
||||||
echo -e "You are using these values:\n"
|
|
||||||
echo -e "Dockerfile:\t\t${DOCKERFILE}"
|
|
||||||
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
|
|
||||||
echo -e "Volumename:\t\t${VOLUMENAME}"
|
|
||||||
echo -e "Platform:\t\t${PLATFORM}"
|
|
||||||
echo -e "Container Registry:\t${CONTAINER_REGISTRY}"
|
|
||||||
echo -e "Container Repository:\t${CONTAINER_REPOSITORY}"
|
|
||||||
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
|
|
||||||
echo -e "Container Flavor:\t${CONTAINER_FLAVOR}"
|
|
||||||
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
|
|
||||||
|
|
||||||
# Create docker volume
|
|
||||||
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
|
|
||||||
echo -e "Volume already exists\n"
|
|
||||||
else
|
|
||||||
echo -n "creating docker volume "
|
|
||||||
docker volume create "${VOLUMENAME}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Build Container
|
|
||||||
DOCKER_BUILDKIT=1 docker build \
|
|
||||||
--platform="${PLATFORM:-linux/amd64}" \
|
|
||||||
--tag="${CONTAINER_IMAGE:-invokeai}" \
|
|
||||||
${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \
|
|
||||||
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
|
|
||||||
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
|
|
||||||
--file="${DOCKERFILE}" \
|
|
||||||
..
|
|
@ -1,51 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# This file is used to set environment variables for the build.sh and run.sh scripts.
|
|
||||||
|
|
||||||
# Try to detect the container flavor if no PIP_EXTRA_INDEX_URL got specified
|
|
||||||
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
|
|
||||||
|
|
||||||
# Activate virtual environment if not already activated and exists
|
|
||||||
if [[ -z $VIRTUAL_ENV ]]; then
|
|
||||||
[[ -e "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" ]] \
|
|
||||||
&& source "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" \
|
|
||||||
&& echo "Activated virtual environment: $VIRTUAL_ENV"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Decide which container flavor to build if not specified
|
|
||||||
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
|
|
||||||
# Check for CUDA and ROCm
|
|
||||||
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
|
|
||||||
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
|
|
||||||
if [[ "${CUDA_AVAILABLE}" == "True" ]]; then
|
|
||||||
CONTAINER_FLAVOR="cuda"
|
|
||||||
elif [[ "${ROCM_AVAILABLE}" == "True" ]]; then
|
|
||||||
CONTAINER_FLAVOR="rocm"
|
|
||||||
else
|
|
||||||
CONTAINER_FLAVOR="cpu"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Set PIP_EXTRA_INDEX_URL based on container flavor
|
|
||||||
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
|
|
||||||
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
|
|
||||||
elif [[ "$CONTAINER_FLAVOR" == "cpu" ]]; then
|
|
||||||
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu"
|
|
||||||
# elif [[ -z "$CONTAINER_FLAVOR" || "$CONTAINER_FLAVOR" == "cuda" ]]; then
|
|
||||||
# PIP_PACKAGE=${PIP_PACKAGE-".[xformers]"}
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Variables shared by build.sh and run.sh
|
|
||||||
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
|
|
||||||
REPOSITORY_NAME="${REPOSITORY_NAME,,}"
|
|
||||||
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME}_data"}"
|
|
||||||
ARCH="${ARCH-$(uname -m)}"
|
|
||||||
PLATFORM="${PLATFORM-linux/${ARCH}}"
|
|
||||||
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
|
|
||||||
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
|
|
||||||
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
|
|
||||||
CONTAINER_FLAVOR="${CONTAINER_FLAVOR-cuda}"
|
|
||||||
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
|
|
||||||
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
|
|
||||||
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"
|
|
@ -1,41 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
|
||||||
|
|
||||||
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
|
||||||
cd "$SCRIPTDIR" || exit 1
|
|
||||||
|
|
||||||
source ./env.sh
|
|
||||||
|
|
||||||
# Create outputs directory if it does not exist
|
|
||||||
[[ -d ./outputs ]] || mkdir ./outputs
|
|
||||||
|
|
||||||
echo -e "You are using these values:\n"
|
|
||||||
echo -e "Volumename:\t${VOLUMENAME}"
|
|
||||||
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
|
|
||||||
echo -e "local Models:\t${MODELSPATH:-unset}\n"
|
|
||||||
|
|
||||||
docker run \
|
|
||||||
--interactive \
|
|
||||||
--tty \
|
|
||||||
--rm \
|
|
||||||
--platform="${PLATFORM}" \
|
|
||||||
--name="${REPOSITORY_NAME,,}" \
|
|
||||||
--hostname="${REPOSITORY_NAME,,}" \
|
|
||||||
--mount=source="${VOLUMENAME}",target=/data \
|
|
||||||
--mount type=bind,source="$(pwd)"/outputs,target=/data/outputs \
|
|
||||||
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
|
|
||||||
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
|
|
||||||
--publish=9090:9090 \
|
|
||||||
--cap-add=sys_nice \
|
|
||||||
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
|
|
||||||
"${CONTAINER_IMAGE}" ${@:+$@}
|
|
||||||
|
|
||||||
# Remove Trash folder
|
|
||||||
for f in outputs/.Trash*; do
|
|
||||||
if [ -e "$f" ]; then
|
|
||||||
rm -Rf "$f"
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
@ -1,5 +0,0 @@
|
|||||||
{
|
|
||||||
"MD046": false,
|
|
||||||
"MD007": false,
|
|
||||||
"MD030": false
|
|
||||||
}
|
|
@ -4,377 +4,180 @@ title: Changelog
|
|||||||
|
|
||||||
# :octicons-log-16: **Changelog**
|
# :octicons-log-16: **Changelog**
|
||||||
|
|
||||||
## v2.3.0 <small>(15 January 2023)</small>
|
|
||||||
|
|
||||||
**Transition to diffusers
|
|
||||||
|
|
||||||
Version 2.3 provides support for both the traditional `.ckpt` weight
|
|
||||||
checkpoint files as well as the HuggingFace `diffusers` format. This
|
|
||||||
introduces several changes you should know about.
|
|
||||||
|
|
||||||
1. The models.yaml format has been updated. There are now two
|
|
||||||
different type of configuration stanza. The traditional ckpt
|
|
||||||
one will look like this, with a `format` of `ckpt` and a
|
|
||||||
`weights` field that points to the absolute or ROOTDIR-relative
|
|
||||||
location of the ckpt file.
|
|
||||||
|
|
||||||
```
|
|
||||||
inpainting-1.5:
|
|
||||||
description: RunwayML SD 1.5 model optimized for inpainting (4.27 GB)
|
|
||||||
repo_id: runwayml/stable-diffusion-inpainting
|
|
||||||
format: ckpt
|
|
||||||
width: 512
|
|
||||||
height: 512
|
|
||||||
weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
|
|
||||||
config: configs/stable-diffusion/v1-inpainting-inference.yaml
|
|
||||||
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
|
||||||
```
|
|
||||||
|
|
||||||
A configuration stanza for a diffusers model hosted at HuggingFace will look like this,
|
|
||||||
with a `format` of `diffusers` and a `repo_id` that points to the
|
|
||||||
repository ID of the model on HuggingFace:
|
|
||||||
|
|
||||||
```
|
|
||||||
stable-diffusion-2.1:
|
|
||||||
description: Stable Diffusion version 2.1 diffusers model (5.21 GB)
|
|
||||||
repo_id: stabilityai/stable-diffusion-2-1
|
|
||||||
format: diffusers
|
|
||||||
```
|
|
||||||
|
|
||||||
A configuration stanza for a diffuers model stored locally should
|
|
||||||
look like this, with a `format` of `diffusers`, but a `path` field
|
|
||||||
that points at the directory that contains `model_index.json`:
|
|
||||||
|
|
||||||
```
|
|
||||||
waifu-diffusion:
|
|
||||||
description: Latest waifu diffusion 1.4
|
|
||||||
format: diffusers
|
|
||||||
path: models/diffusers/hakurei-haifu-diffusion-1.4
|
|
||||||
```
|
|
||||||
|
|
||||||
2. In order of precedence, InvokeAI will now use HF_HOME, then
|
|
||||||
XDG_CACHE_HOME, then finally default to `ROOTDIR/models` to
|
|
||||||
store HuggingFace diffusers models.
|
|
||||||
|
|
||||||
Consequently, the format of the models directory has changed to
|
|
||||||
mimic the HuggingFace cache directory. When HF_HOME and XDG_HOME
|
|
||||||
are not set, diffusers models are now automatically downloaded
|
|
||||||
and retrieved from the directory `ROOTDIR/models/diffusers`,
|
|
||||||
while other models are stored in the directory
|
|
||||||
`ROOTDIR/models/hub`. This organization is the same as that used
|
|
||||||
by HuggingFace for its cache management.
|
|
||||||
|
|
||||||
This allows you to share diffusers and ckpt model files easily with
|
|
||||||
other machine learning applications that use the HuggingFace
|
|
||||||
libraries. To do this, set the environment variable HF_HOME
|
|
||||||
before starting up InvokeAI to tell it what directory to
|
|
||||||
cache models in. To tell InvokeAI to use the standard HuggingFace
|
|
||||||
cache directory, you would set HF_HOME like this (Linux/Mac):
|
|
||||||
|
|
||||||
`export HF_HOME=~/.cache/huggingface`
|
|
||||||
|
|
||||||
Both HuggingFace and InvokeAI will fall back to the XDG_CACHE_HOME
|
|
||||||
environment variable if HF_HOME is not set; this path
|
|
||||||
takes precedence over `ROOTDIR/models` to allow for the same sharing
|
|
||||||
with other machine learning applications that use HuggingFace
|
|
||||||
libraries.
|
|
||||||
|
|
||||||
3. If you upgrade to InvokeAI 2.3.* from an earlier version, there
|
|
||||||
will be a one-time migration from the old models directory format
|
|
||||||
to the new one. You will see a message about this the first time
|
|
||||||
you start `invoke.py`.
|
|
||||||
|
|
||||||
4. Both the front end back ends of the model manager have been
|
|
||||||
rewritten to accommodate diffusers. You can import models using
|
|
||||||
their local file path, using their URLs, or their HuggingFace
|
|
||||||
repo_ids. On the command line, all these syntaxes work:
|
|
||||||
|
|
||||||
```
|
|
||||||
!import_model stabilityai/stable-diffusion-2-1-base
|
|
||||||
!import_model /opt/sd-models/sd-1.4.ckpt
|
|
||||||
!import_model https://huggingface.co/Fictiverse/Stable_Diffusion_PaperCut_Model/blob/main/PaperCut_v1.ckpt
|
|
||||||
```
|
|
||||||
|
|
||||||
**KNOWN BUGS (15 January 2023)
|
|
||||||
|
|
||||||
1. On CUDA systems, the 768 pixel stable-diffusion-2.0 and
|
|
||||||
stable-diffusion-2.1 models can only be run as `diffusers` models
|
|
||||||
when the `xformer` library is installed and configured. Without
|
|
||||||
`xformers`, InvokeAI returns black images.
|
|
||||||
|
|
||||||
2. Inpainting and outpainting have regressed in quality.
|
|
||||||
|
|
||||||
Both these issues are being actively worked on.
|
|
||||||
|
|
||||||
## v2.2.4 <small>(11 December 2022)</small>
|
|
||||||
|
|
||||||
**the `invokeai` directory**
|
|
||||||
|
|
||||||
Previously there were two directories to worry about, the directory that
|
|
||||||
contained the InvokeAI source code and the launcher scripts, and the `invokeai`
|
|
||||||
directory that contained the models files, embeddings, configuration and
|
|
||||||
outputs. With the 2.2.4 release, this dual system is done away with, and
|
|
||||||
everything, including the `invoke.bat` and `invoke.sh` launcher scripts, now
|
|
||||||
live in a directory named `invokeai`. By default this directory is located in
|
|
||||||
your home directory (e.g. `\Users\yourname` on Windows), but you can select
|
|
||||||
where it goes at install time.
|
|
||||||
|
|
||||||
After installation, you can delete the install directory (the one that the zip
|
|
||||||
file creates when it unpacks). Do **not** delete or move the `invokeai`
|
|
||||||
directory!
|
|
||||||
|
|
||||||
**Initialization file `invokeai/invokeai.init`**
|
|
||||||
|
|
||||||
You can place frequently-used startup options in this file, such as the default
|
|
||||||
number of steps or your preferred sampler. To keep everything in one place, this
|
|
||||||
file has now been moved into the `invokeai` directory and is named
|
|
||||||
`invokeai.init`.
|
|
||||||
|
|
||||||
**To update from Version 2.2.3**
|
|
||||||
|
|
||||||
The easiest route is to download and unpack one of the 2.2.4 installer files.
|
|
||||||
When it asks you for the location of the `invokeai` runtime directory, respond
|
|
||||||
with the path to the directory that contains your 2.2.3 `invokeai`. That is, if
|
|
||||||
`invokeai` lives at `C:\Users\fred\invokeai`, then answer with `C:\Users\fred`
|
|
||||||
and answer "Y" when asked if you want to reuse the directory.
|
|
||||||
|
|
||||||
The `update.sh` (`update.bat`) script that came with the 2.2.3 source installer
|
|
||||||
does not know about the new directory layout and won't be fully functional.
|
|
||||||
|
|
||||||
**To update to 2.2.5 (and beyond) there's now an update path**
|
|
||||||
|
|
||||||
As they become available, you can update to more recent versions of InvokeAI
|
|
||||||
using an `update.sh` (`update.bat`) script located in the `invokeai` directory.
|
|
||||||
Running it without any arguments will install the most recent version of
|
|
||||||
InvokeAI. Alternatively, you can get set releases by running the `update.sh`
|
|
||||||
script with an argument in the command shell. This syntax accepts the path to
|
|
||||||
the desired release's zip file, which you can find by clicking on the green
|
|
||||||
"Code" button on this repository's home page.
|
|
||||||
|
|
||||||
**Other 2.2.4 Improvements**
|
|
||||||
|
|
||||||
- Fix InvokeAI GUI initialization by @addianto in #1687
|
|
||||||
- fix link in documentation by @lstein in #1728
|
|
||||||
- Fix broken link by @ShawnZhong in #1736
|
|
||||||
- Remove reference to binary installer by @lstein in #1731
|
|
||||||
- documentation fixes for 2.2.3 by @lstein in #1740
|
|
||||||
- Modify installer links to point closer to the source installer by @ebr in
|
|
||||||
#1745
|
|
||||||
- add documentation warning about 1650/60 cards by @lstein in #1753
|
|
||||||
- Fix Linux source URL in installation docs by @andybearman in #1756
|
|
||||||
- Make install instructions discoverable in readme by @damian0815 in #1752
|
|
||||||
- typo fix by @ofirkris in #1755
|
|
||||||
- Non-interactive model download (support HUGGINGFACE_TOKEN) by @ebr in #1578
|
|
||||||
- fix(srcinstall): shell installer - cp scripts instead of linking by @tildebyte
|
|
||||||
in #1765
|
|
||||||
- stability and usage improvements to binary & source installers by @lstein in
|
|
||||||
#1760
|
|
||||||
- fix off-by-one bug in cross-attention-control by @damian0815 in #1774
|
|
||||||
- Eventually update APP_VERSION to 2.2.3 by @spezialspezial in #1768
|
|
||||||
- invoke script cds to its location before running by @lstein in #1805
|
|
||||||
- Make PaperCut and VoxelArt models load again by @lstein in #1730
|
|
||||||
- Fix --embedding_directory / --embedding_path not working by @blessedcoolant in
|
|
||||||
#1817
|
|
||||||
- Clean up readme by @hipsterusername in #1820
|
|
||||||
- Optimized Docker build with support for external working directory by @ebr in
|
|
||||||
#1544
|
|
||||||
- disable pushing the cloud container by @mauwii in #1831
|
|
||||||
- Fix docker push github action and expand with additional metadata by @ebr in
|
|
||||||
#1837
|
|
||||||
- Fix Broken Link To Notebook by @VedantMadane in #1821
|
|
||||||
- Account for flat models by @spezialspezial in #1766
|
|
||||||
- Update invoke.bat.in isolate environment variables by @lynnewu in #1833
|
|
||||||
- Arch Linux Specific PatchMatch Instructions & fixing conda install on linux by
|
|
||||||
@SammCheese in #1848
|
|
||||||
- Make force free GPU memory work in img2img by @addianto in #1844
|
|
||||||
- New installer by @lstein
|
|
||||||
|
|
||||||
## v2.2.3 <small>(2 December 2022)</small>
|
|
||||||
|
|
||||||
!!! Note
|
|
||||||
|
|
||||||
This point release removes references to the binary installer from the
|
|
||||||
installation guide. The binary installer is not stable at the current
|
|
||||||
time. First time users are encouraged to use the "source" installer as
|
|
||||||
described in [Installing InvokeAI with the Source Installer](installation/deprecated_documentation/INSTALL_SOURCE.md)
|
|
||||||
|
|
||||||
With InvokeAI 2.2, this project now provides enthusiasts and professionals a
|
|
||||||
robust workflow solution for creating AI-generated and human facilitated
|
|
||||||
compositions. Additional enhancements have been made as well, improving safety,
|
|
||||||
ease of use, and installation.
|
|
||||||
|
|
||||||
Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
|
|
||||||
512x768 image (and less for smaller images), and is compatible with
|
|
||||||
Windows/Linux/Mac (M1 & M2).
|
|
||||||
|
|
||||||
You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
|
|
||||||
introduces the main WebUI enhancement for version 2.2 -
|
|
||||||
[The Unified Canvas](features/UNIFIED_CANVAS.md). This new workflow is the
|
|
||||||
biggest enhancement added to the WebUI to date, and unlocks a stunning amount of
|
|
||||||
potential for users to create and iterate on their creations. The following
|
|
||||||
sections describe what's new for InvokeAI.
|
|
||||||
|
|
||||||
## v2.2.2 <small>(30 November 2022)</small>
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
|
|
||||||
The binary installer is not ready for prime time. First time users are recommended to install via the "source" installer accessible through the links at the bottom of this page.****
|
|
||||||
|
|
||||||
With InvokeAI 2.2, this project now provides enthusiasts and professionals a
|
|
||||||
robust workflow solution for creating AI-generated and human facilitated
|
|
||||||
compositions. Additional enhancements have been made as well, improving safety,
|
|
||||||
ease of use, and installation.
|
|
||||||
|
|
||||||
Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
|
|
||||||
512x768 image (and less for smaller images), and is compatible with
|
|
||||||
Windows/Linux/Mac (M1 & M2).
|
|
||||||
|
|
||||||
You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
|
|
||||||
introduces the main WebUI enhancement for version 2.2 -
|
|
||||||
[The Unified Canvas](https://invoke-ai.github.io/InvokeAI/features/UNIFIED_CANVAS/).
|
|
||||||
This new workflow is the biggest enhancement added to the WebUI to date, and
|
|
||||||
unlocks a stunning amount of potential for users to create and iterate on their
|
|
||||||
creations. The following sections describe what's new for InvokeAI.
|
|
||||||
|
|
||||||
## v2.2.0 <small>(2 December 2022)</small>
|
|
||||||
|
|
||||||
With InvokeAI 2.2, this project now provides enthusiasts and professionals a
|
|
||||||
robust workflow solution for creating AI-generated and human facilitated
|
|
||||||
compositions. Additional enhancements have been made as well, improving safety,
|
|
||||||
ease of use, and installation.
|
|
||||||
|
|
||||||
Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
|
|
||||||
512x768 image (and less for smaller images), and is compatible with
|
|
||||||
Windows/Linux/Mac (M1 & M2).
|
|
||||||
|
|
||||||
You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
|
|
||||||
introduces the main WebUI enhancement for version 2.2 -
|
|
||||||
[The Unified Canvas](features/UNIFIED_CANVAS.md). This new workflow is the
|
|
||||||
biggest enhancement added to the WebUI to date, and unlocks a stunning amount of
|
|
||||||
potential for users to create and iterate on their creations. The following
|
|
||||||
sections describe what's new for InvokeAI.
|
|
||||||
|
|
||||||
## v2.1.3 <small>(13 November 2022)</small>
|
|
||||||
|
|
||||||
- A choice of installer scripts that automate installation and configuration.
|
|
||||||
See
|
|
||||||
[Installation](installation/index.md).
|
|
||||||
- A streamlined manual installation process that works for both Conda and
|
|
||||||
PIP-only installs. See
|
|
||||||
[Manual Installation](installation/020_INSTALL_MANUAL.md).
|
|
||||||
- The ability to save frequently-used startup options (model to load, steps,
|
|
||||||
sampler, etc) in a `.invokeai` file. See
|
|
||||||
[Client](features/CLI.md)
|
|
||||||
- Support for AMD GPU cards (non-CUDA) on Linux machines.
|
|
||||||
- Multiple bugs and edge cases squashed.
|
|
||||||
|
|
||||||
## v2.1.0 <small>(2 November 2022)</small>
|
## v2.1.0 <small>(2 November 2022)</small>
|
||||||
|
|
||||||
- update mac instructions to use invokeai for env name by @willwillems in #1030
|
- update mac instructions to use invokeai for env name by @willwillems in
|
||||||
- Update .gitignore by @blessedcoolant in #1040
|
https://github.com/invoke-ai/InvokeAI/pull/1030
|
||||||
- reintroduce fix for m1 from #579 missing after merge by @skurovec in #1056
|
- Update .gitignore by @blessedcoolant in
|
||||||
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in #1060
|
https://github.com/invoke-ai/InvokeAI/pull/1040
|
||||||
- Print out the device type which is used by @manzke in #1073
|
- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579
|
||||||
- Hires Addition by @hipsterusername in #1063
|
missing after merge by @skurovec in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1056
|
||||||
|
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1060
|
||||||
|
- Print out the device type which is used by @manzke in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1073
|
||||||
|
- Hires Addition by @hipsterusername in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1063
|
||||||
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
||||||
@skurovec in #1081
|
@skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
|
||||||
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
||||||
warning by @db3000 in #1077
|
warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
|
||||||
- fix noisy images at high step counts by @lstein in #1086
|
- fix noisy images at high step counts by @lstein in
|
||||||
- Generalize facetool strength argument by @db3000 in #1078
|
https://github.com/invoke-ai/InvokeAI/pull/1086
|
||||||
|
- Generalize facetool strength argument by @db3000 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1078
|
||||||
- Enable fast switching among models at the invoke> command line by @lstein in
|
- Enable fast switching among models at the invoke> command line by @lstein in
|
||||||
#1066
|
https://github.com/invoke-ai/InvokeAI/pull/1066
|
||||||
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in #1095
|
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in
|
||||||
- Update generate.py by @unreleased in #1109
|
https://github.com/invoke-ai/InvokeAI/pull/1095
|
||||||
- Update 'ldm' env to 'invokeai' in troubleshooting steps by @19wolf in #1125
|
- Update generate.py by @unreleased in
|
||||||
- Fixed documentation typos and resolved merge conflicts by @rupeshs in #1123
|
https://github.com/invoke-ai/InvokeAI/pull/1109
|
||||||
- Fix broken doc links, fix malaprop in the project subtitle by @majick in #1131
|
- Update 'ldm' env to 'invokeai' in troubleshooting steps by @19wolf in
|
||||||
- Only output facetool parameters if enhancing faces by @db3000 in #1119
|
https://github.com/invoke-ai/InvokeAI/pull/1125
|
||||||
|
- Fixed documentation typos and resolved merge conflicts by @rupeshs in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1123
|
||||||
|
- Fix broken doc links, fix malaprop in the project subtitle by @majick in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1131
|
||||||
|
- Only output facetool parameters if enhancing faces by @db3000 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1119
|
||||||
- Update gitignore to ignore codeformer weights at new location by
|
- Update gitignore to ignore codeformer weights at new location by
|
||||||
@spezialspezial in #1136
|
@spezialspezial in https://github.com/invoke-ai/InvokeAI/pull/1136
|
||||||
- fix links to point to invoke-ai.github.io #1117 by @mauwii in #1143
|
- fix links to point to invoke-ai.github.io #1117 by @mauwii in
|
||||||
- Rework-mkdocs by @mauwii in #1144
|
https://github.com/invoke-ai/InvokeAI/pull/1143
|
||||||
|
- Rework-mkdocs by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1144
|
||||||
- add option to CLI and pngwriter that allows user to set PNG compression level
|
- add option to CLI and pngwriter that allows user to set PNG compression level
|
||||||
by @lstein in #1127
|
by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
|
||||||
- Fix img2img DDIM index out of bound by @wfng92 in #1137
|
- Fix img2img DDIM index out of bound by @wfng92 in
|
||||||
- Fix gh actions by @mauwii in #1128
|
https://github.com/invoke-ai/InvokeAI/pull/1137
|
||||||
- update mac instructions to use invokeai for env name by @willwillems in #1030
|
- Fix gh actions by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1128
|
||||||
- Update .gitignore by @blessedcoolant in #1040
|
- update mac instructions to use invokeai for env name by @willwillems in
|
||||||
- reintroduce fix for m1 from #579 missing after merge by @skurovec in #1056
|
https://github.com/invoke-ai/InvokeAI/pull/1030
|
||||||
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in #1060
|
- Update .gitignore by @blessedcoolant in
|
||||||
- Print out the device type which is used by @manzke in #1073
|
https://github.com/invoke-ai/InvokeAI/pull/1040
|
||||||
- Hires Addition by @hipsterusername in #1063
|
- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579
|
||||||
|
missing after merge by @skurovec in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1056
|
||||||
|
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1060
|
||||||
|
- Print out the device type which is used by @manzke in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1073
|
||||||
|
- Hires Addition by @hipsterusername in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1063
|
||||||
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
||||||
@skurovec in #1081
|
@skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
|
||||||
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
||||||
warning by @db3000 in #1077
|
warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
|
||||||
- fix noisy images at high step counts by @lstein in #1086
|
- fix noisy images at high step counts by @lstein in
|
||||||
- Generalize facetool strength argument by @db3000 in #1078
|
https://github.com/invoke-ai/InvokeAI/pull/1086
|
||||||
|
- Generalize facetool strength argument by @db3000 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1078
|
||||||
- Enable fast switching among models at the invoke> command line by @lstein in
|
- Enable fast switching among models at the invoke> command line by @lstein in
|
||||||
#1066
|
https://github.com/invoke-ai/InvokeAI/pull/1066
|
||||||
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in #1095
|
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in
|
||||||
- Fixed documentation typos and resolved merge conflicts by @rupeshs in #1123
|
https://github.com/invoke-ai/InvokeAI/pull/1095
|
||||||
- Only output facetool parameters if enhancing faces by @db3000 in #1119
|
- Fixed documentation typos and resolved merge conflicts by @rupeshs in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1123
|
||||||
|
- Only output facetool parameters if enhancing faces by @db3000 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1119
|
||||||
- add option to CLI and pngwriter that allows user to set PNG compression level
|
- add option to CLI and pngwriter that allows user to set PNG compression level
|
||||||
by @lstein in #1127
|
by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
|
||||||
- Fix img2img DDIM index out of bound by @wfng92 in #1137
|
- Fix img2img DDIM index out of bound by @wfng92 in
|
||||||
- Add text prompt to inpaint mask support by @lstein in #1133
|
https://github.com/invoke-ai/InvokeAI/pull/1137
|
||||||
|
- Add text prompt to inpaint mask support by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1133
|
||||||
- Respect http[s] protocol when making socket.io middleware by @damian0815 in
|
- Respect http[s] protocol when making socket.io middleware by @damian0815 in
|
||||||
#976
|
https://github.com/invoke-ai/InvokeAI/pull/976
|
||||||
- WebUI: Adds Codeformer support by @psychedelicious in #1151
|
- WebUI: Adds Codeformer support by @psychedelicious in
|
||||||
- Skips normalizing prompts for web UI metadata by @psychedelicious in #1165
|
https://github.com/invoke-ai/InvokeAI/pull/1151
|
||||||
- Add Asymmetric Tiling by @carson-katri in #1132
|
- Skips normalizing prompts for web UI metadata by @psychedelicious in
|
||||||
- Web UI: Increases max CFG Scale to 200 by @psychedelicious in #1172
|
https://github.com/invoke-ai/InvokeAI/pull/1165
|
||||||
|
- Add Asymmetric Tiling by @carson-katri in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1132
|
||||||
|
- Web UI: Increases max CFG Scale to 200 by @psychedelicious in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1172
|
||||||
- Corrects color channels in face restoration; Fixes #1167 by @psychedelicious
|
- Corrects color channels in face restoration; Fixes #1167 by @psychedelicious
|
||||||
in #1175
|
in https://github.com/invoke-ai/InvokeAI/pull/1175
|
||||||
- Flips channels using array slicing instead of using OpenCV by @psychedelicious
|
- Flips channels using array slicing instead of using OpenCV by @psychedelicious
|
||||||
in #1178
|
in https://github.com/invoke-ai/InvokeAI/pull/1178
|
||||||
- Fix typo in docs: s/Formally/Formerly by @noodlebox in #1176
|
- Fix typo in docs: s/Formally/Formerly by @noodlebox in
|
||||||
- fix clipseg loading problems by @lstein in #1177
|
https://github.com/invoke-ai/InvokeAI/pull/1176
|
||||||
- Correct color channels in upscale using array slicing by @wfng92 in #1181
|
- fix clipseg loading problems by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1177
|
||||||
|
- Correct color channels in upscale using array slicing by @wfng92 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1181
|
||||||
- Web UI: Filters existing images when adding new images; Fixes #1085 by
|
- Web UI: Filters existing images when adding new images; Fixes #1085 by
|
||||||
@psychedelicious in #1171
|
@psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1171
|
||||||
- fix a number of bugs in textual inversion by @lstein in #1190
|
- fix a number of bugs in textual inversion by @lstein in
|
||||||
- Improve !fetch, add !replay command by @ArDiouscuros in #882
|
https://github.com/invoke-ai/InvokeAI/pull/1190
|
||||||
- Fix generation of image with s>1000 by @holstvoogd in #951
|
- Improve !fetch, add !replay command by @ArDiouscuros in
|
||||||
- Web UI: Gallery improvements by @psychedelicious in #1198
|
https://github.com/invoke-ai/InvokeAI/pull/882
|
||||||
- Update CLI.md by @krummrey in #1211
|
- Fix generation of image with s>1000 by @holstvoogd in
|
||||||
- outcropping improvements by @lstein in #1207
|
https://github.com/invoke-ai/InvokeAI/pull/951
|
||||||
- add support for loading VAE autoencoders by @lstein in #1216
|
- Web UI: Gallery improvements by @psychedelicious in
|
||||||
- remove duplicate fix_func for MPS by @wfng92 in #1210
|
https://github.com/invoke-ai/InvokeAI/pull/1198
|
||||||
- Metadata storage and retrieval fixes by @lstein in #1204
|
- Update CLI.md by @krummrey in https://github.com/invoke-ai/InvokeAI/pull/1211
|
||||||
- nix: add shell.nix file by @Cloudef in #1170
|
- outcropping improvements by @lstein in
|
||||||
- Web UI: Changes vite dist asset paths to relative by @psychedelicious in #1185
|
https://github.com/invoke-ai/InvokeAI/pull/1207
|
||||||
- Web UI: Removes isDisabled from PromptInput by @psychedelicious in #1187
|
- add support for loading VAE autoencoders by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1216
|
||||||
|
- remove duplicate fix_func for MPS by @wfng92 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1210
|
||||||
|
- Metadata storage and retrieval fixes by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1204
|
||||||
|
- nix: add shell.nix file by @Cloudef in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1170
|
||||||
|
- Web UI: Changes vite dist asset paths to relative by @psychedelicious in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1185
|
||||||
|
- Web UI: Removes isDisabled from PromptInput by @psychedelicious in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1187
|
||||||
- Allow user to generate images with initial noise as on M1 / mps system by
|
- Allow user to generate images with initial noise as on M1 / mps system by
|
||||||
@ArDiouscuros in #981
|
@ArDiouscuros in https://github.com/invoke-ai/InvokeAI/pull/981
|
||||||
- feat: adding filename format template by @plucked in #968
|
- feat: adding filename format template by @plucked in
|
||||||
- Web UI: Fixes broken bundle by @psychedelicious in #1242
|
https://github.com/invoke-ai/InvokeAI/pull/968
|
||||||
- Support runwayML custom inpainting model by @lstein in #1243
|
- Web UI: Fixes broken bundle by @psychedelicious in
|
||||||
- Update IMG2IMG.md by @talitore in #1262
|
https://github.com/invoke-ai/InvokeAI/pull/1242
|
||||||
|
- Support runwayML custom inpainting model by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1243
|
||||||
|
- Update IMG2IMG.md by @talitore in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1262
|
||||||
- New dockerfile - including a build- and a run- script as well as a GH-Action
|
- New dockerfile - including a build- and a run- script as well as a GH-Action
|
||||||
by @mauwii in #1233
|
by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1233
|
||||||
- cut over from karras to model noise schedule for higher steps by @lstein in
|
- cut over from karras to model noise schedule for higher steps by @lstein in
|
||||||
#1222
|
https://github.com/invoke-ai/InvokeAI/pull/1222
|
||||||
- Prompt tweaks by @lstein in #1268
|
- Prompt tweaks by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1268
|
||||||
- Outpainting implementation by @Kyle0654 in #1251
|
- Outpainting implementation by @Kyle0654 in
|
||||||
- fixing aspect ratio on hires by @tjennings in #1249
|
https://github.com/invoke-ai/InvokeAI/pull/1251
|
||||||
- Fix-build-container-action by @mauwii in #1274
|
- fixing aspect ratio on hires by @tjennings in
|
||||||
- handle all unicode characters by @damian0815 in #1276
|
https://github.com/invoke-ai/InvokeAI/pull/1249
|
||||||
- adds models.user.yml to .gitignore by @JakeHL in #1281
|
- Fix-build-container-action by @mauwii in
|
||||||
- remove debug branch, set fail-fast to false by @mauwii in #1284
|
https://github.com/invoke-ai/InvokeAI/pull/1274
|
||||||
- Protect-secrets-on-pr by @mauwii in #1285
|
- handle all unicode characters by @damian0815 in
|
||||||
- Web UI: Adds initial inpainting implementation by @psychedelicious in #1225
|
https://github.com/invoke-ai/InvokeAI/pull/1276
|
||||||
- fix environment-mac.yml - tested on x64 and arm64 by @mauwii in #1289
|
- adds models.user.yml to .gitignore by @JakeHL in
|
||||||
- Use proper authentication to download model by @mauwii in #1287
|
https://github.com/invoke-ai/InvokeAI/pull/1281
|
||||||
- Prevent indexing error for mode RGB by @spezialspezial in #1294
|
- remove debug branch, set fail-fast to false by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1284
|
||||||
|
- Protect-secrets-on-pr by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1285
|
||||||
|
- Web UI: Adds initial inpainting implementation by @psychedelicious in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1225
|
||||||
|
- fix environment-mac.yml - tested on x64 and arm64 by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1289
|
||||||
|
- Use proper authentication to download model by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1287
|
||||||
|
- Prevent indexing error for mode RGB by @spezialspezial in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1294
|
||||||
- Integrate sd-v1-5 model into test matrix (easily expandable), remove
|
- Integrate sd-v1-5 model into test matrix (easily expandable), remove
|
||||||
unecesarry caches by @mauwii in #1293
|
unecesarry caches by @mauwii in
|
||||||
- add --no-interactive to configure_invokeai step by @mauwii in #1302
|
https://github.com/invoke-ai/InvokeAI/pull/1293
|
||||||
|
- add --no-interactive to preload_models step by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1302
|
||||||
- 1-click installer and updater. Uses micromamba to install git and conda into a
|
- 1-click installer and updater. Uses micromamba to install git and conda into a
|
||||||
contained environment (if necessary) before running the normal installation
|
contained environment (if necessary) before running the normal installation
|
||||||
script by @cmdr2 in #1253
|
script by @cmdr2 in https://github.com/invoke-ai/InvokeAI/pull/1253
|
||||||
- configure_invokeai.py script downloads the weight files by @lstein in #1290
|
- preload_models.py script downloads the weight files by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1290
|
||||||
|
|
||||||
## v2.0.1 <small>(13 October 2022)</small>
|
## v2.0.1 <small>(13 October 2022)</small>
|
||||||
|
|
||||||
|
Before Width: | Height: | Size: 359 KiB |
Before Width: | Height: | Size: 528 KiB |
Before Width: | Height: | Size: 601 KiB |
Before Width: | Height: | Size: 59 KiB |
Before Width: | Height: | Size: 142 KiB |
Before Width: | Height: | Size: 122 KiB |
Before Width: | Height: | Size: 128 KiB |
Before Width: | Height: | Size: 99 KiB |
Before Width: | Height: | Size: 112 KiB |
Before Width: | Height: | Size: 107 KiB |
Before Width: | Height: | Size: 26 KiB |
Before Width: | Height: | Size: 84 KiB |