Compare commits
243 Commits
developmen
...
2.2.5-rc5
Author | SHA1 | Date | |
---|---|---|---|
5057beddf5 | |||
ade9bbe185 | |||
83df5c211c | |||
75f07dd22e | |||
9d103ef030 | |||
4cc60669c1 | |||
d456aea8f3 | |||
4151883cb2 | |||
060eff5dad | |||
5d00831f71 | |||
d74ed7e974 | |||
a029d90630 | |||
211d6b3831 | |||
b40faa98bd | |||
8d4ad0de4e | |||
e4b2f815e8 | |||
0dd5804949 | |||
53476af72e | |||
61ee597f4b | |||
ad0b366e47 | |||
942f029a24 | |||
e0d7c466cc | |||
16c0132a6b | |||
7cb2fcf8b4 | |||
1a65d43569 | |||
1313e31f62 | |||
aa213285bb | |||
f691353570 | |||
1c75010f29 | |||
eba8fb58ed | |||
83a7e60fe5 | |||
d4e86feeeb | |||
427614d1df | |||
ce6fb8ea29 | |||
df858eb3f9 | |||
6523fd07ab | |||
a823e37126 | |||
4eed06903c | |||
79d577bff9 | |||
3521557541 | |||
451750229d | |||
080fe48106 | |||
ff0eb56c96 | |||
006123aa32 | |||
540da32bd5 | |||
aa084b205f | |||
49f97f994a | |||
211d7be03d | |||
7d99416cc9 | |||
f60bf9e1e6 | |||
fce7b5466a | |||
e66b1a685c | |||
c351aa19eb | |||
aa1f46820f | |||
1d34405f4f | |||
f961e865f5 | |||
9eba6acb7f | |||
e32dd1d703 | |||
bbbfea488d | |||
c8a9848ad6 | |||
e88e274bf2 | |||
cca8d14c79 | |||
464aafa862 | |||
6e98b5535d | |||
ab2972f320 | |||
1ba40db361 | |||
f69fc68e06 | |||
7d8d4bcafb | |||
4fd97ceddd | |||
ded49523cd | |||
914e5fc4f8 | |||
ab4d391a3a | |||
82f59829b8 | |||
147834e99c | |||
f41da11d66 | |||
5c5454e4a5 | |||
dedbdeeafc | |||
d1770bff37 | |||
20652620d9 | |||
51613525a4 | |||
dc39f8d6a7 | |||
f1748d7017 | |||
de7abce464 | |||
2aa5bb6aad | |||
c0c4d7ca69 | |||
7d09d9da49 | |||
ffa54f4a35 | |||
69cc0993f8 | |||
1050f2726a | |||
f7170e4156 | |||
bfa8fed568 | |||
2923dfaed1 | |||
0932b4affa | |||
0b10835269 | |||
6e0f3475b4 | |||
9b9e276491 | |||
392c0725f3 | |||
2a2f38a016 | |||
7a4e647287 | |||
b8e1151a9c | |||
f39cb668fc | |||
6c015eedb3 | |||
834e56a513 | |||
652aaa809b | |||
89880e1f72 | |||
d94f955d9d | |||
64339af2dc | |||
5d20f47993 | |||
ccf8a46320 | |||
af3d72e001 | |||
1d78e1af9c | |||
1fd605604f | |||
f0b04c5066 | |||
2836976d6d | |||
474220ce8e | |||
4074705194 | |||
e89ff01caf | |||
2187d0f31c | |||
1219c39d78 | |||
bc0b0e4752 | |||
cd3da2900d | |||
4402ca10b2 | |||
1a1625406c | |||
36e6908266 | |||
7314f1a862 | |||
5c3cbd05f1 | |||
f4e7383490 | |||
96a12099ed | |||
e159bb3dce | |||
bd0c0d77d2 | |||
f745f78cb3 | |||
7efe0f3996 | |||
9f855a358a | |||
62b80a81d3 | |||
14587c9a95 | |||
fcae5defe3 | |||
e7144055d1 | |||
c857c6cc62 | |||
7ecb11cf86 | |||
e4b61923ae | |||
aa68e4e0da | |||
09365d6d2e | |||
b77f34998c | |||
0439b51a26 | |||
ef6870c714 | |||
8cbb50c204 | |||
12a8d7fc14 | |||
3d2b497eb0 | |||
786b8878d6 | |||
55132f6463 | |||
ed9186b099 | |||
d2026d0509 | |||
0bc4ed14cd | |||
06369d07c0 | |||
4e61069821 | |||
d7ba041007 | |||
3859302f1c | |||
865439114b | |||
4d76116152 | |||
42f5bd4e12 | |||
04e77f3858 | |||
1fc1eeec38 | |||
556081695a | |||
ad7917c7aa | |||
39cca8139f | |||
1d1988683b | |||
44a0055571 | |||
0cc01143d8 | |||
1c0247d58a | |||
d335f51e5f | |||
38cd968130 | |||
0111304982 | |||
c607d4fe6c | |||
6d6076d3c7 | |||
485fcc7fcb | |||
76633f500a | |||
ed6194351c | |||
f237744ab1 | |||
678cf8519e | |||
ee9de75b8d | |||
50f3847ef8 | |||
8596e3586c | |||
5ef1e0714b | |||
be871c3ab3 | |||
dec40d9b04 | |||
fe5c008dd5 | |||
72def2ae13 | |||
31cd76a2af | |||
00c78263ce | |||
5c31feb3a1 | |||
26f129cef8 | |||
292ee06751 | |||
c00d53fcce | |||
a78a8728fe | |||
6b5d19347a | |||
26671d8eed | |||
b487fa4391 | |||
12b98ba4ec | |||
fa25a64d37 | |||
29540452f2 | |||
c7960f930a | |||
c1c8b5026a | |||
5da42e0ad2 | |||
34d6f35408 | |||
401165ba35 | |||
6d8057c84f | |||
3f23dee6f4 | |||
8cdd961ad2 | |||
470b267939 | |||
bf399e303c | |||
b3d7ad7461 | |||
cd66b2c76d | |||
6b406e2b5e | |||
6737cc1443 | |||
7fd0eeb9f9 | |||
16e3b45fa2 | |||
2f07ea03a9 | |||
b563d75c58 | |||
a7b7b20d16 | |||
a47ef3ded9 | |||
7cb9b654f3 | |||
8819e12a86 | |||
967eb60ea9 | |||
b1091ecda1 | |||
2723dd9051 | |||
8f050d992e | |||
0346095876 | |||
f9bbc55f74 | |||
878a3907e9 | |||
4cfb41d9ae | |||
6ec64ecb3c | |||
540315edaa | |||
cf10a1b736 | |||
9fb2a43780 | |||
1b743f7d9b | |||
d7bf3f7d7b | |||
eba31e7caf | |||
bde456f9fa | |||
9ee83380e6 | |||
6982e6a469 | |||
0f4d71ed63 | |||
8f3f64b22e | |||
dba0280790 |
@ -1,3 +1,19 @@
|
|||||||
*
|
*
|
||||||
!environment*.yml
|
!backend
|
||||||
!docker-build
|
!environments-and-requirements
|
||||||
|
!frontend
|
||||||
|
!ldm
|
||||||
|
!main.py
|
||||||
|
!scripts
|
||||||
|
!server
|
||||||
|
!static
|
||||||
|
!setup.py
|
||||||
|
|
||||||
|
# Guard against pulling in any models that might exist in the directory tree
|
||||||
|
**/*.pt*
|
||||||
|
|
||||||
|
# unignore configs, but only ignore the custom models.yaml, in case it exists
|
||||||
|
!configs
|
||||||
|
configs/models.yaml
|
||||||
|
|
||||||
|
**/__pycache__
|
||||||
|
12
.editorconfig
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# All files
|
||||||
|
[*]
|
||||||
|
charset = utf-8
|
||||||
|
end_of_line = lf
|
||||||
|
indent_size = 2
|
||||||
|
indent_style = space
|
||||||
|
insert_final_newline = true
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
|
||||||
|
# Python
|
||||||
|
[*.py]
|
||||||
|
indent_size = 4
|
2
.github/CODEOWNERS
vendored
@ -3,3 +3,5 @@ ldm/invoke/server_legacy.py @CapableWeb
|
|||||||
scripts/legacy_api.py @CapableWeb
|
scripts/legacy_api.py @CapableWeb
|
||||||
tests/legacy_tests.sh @CapableWeb
|
tests/legacy_tests.sh @CapableWeb
|
||||||
installer/ @tildebyte
|
installer/ @tildebyte
|
||||||
|
.github/workflows/ @mauwii
|
||||||
|
docker_build/ @mauwii
|
||||||
|
87
.github/workflows/build-cloud-img.yml
vendored
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
name: Build and push cloud image
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
# push:
|
||||||
|
# branches:
|
||||||
|
# - main
|
||||||
|
# tags:
|
||||||
|
# - v*
|
||||||
|
# # we will NOT push the image on pull requests, only test buildability.
|
||||||
|
# pull_request:
|
||||||
|
# branches:
|
||||||
|
# - main
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
env:
|
||||||
|
REGISTRY: ghcr.io
|
||||||
|
IMAGE_NAME: ${{ github.repository }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
docker:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
arch:
|
||||||
|
- x86_64
|
||||||
|
# requires resolving a patchmatch issue
|
||||||
|
# - aarch64
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: ${{ matrix.arch }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v2
|
||||||
|
if: matrix.arch == 'aarch64'
|
||||||
|
|
||||||
|
- name: Docker meta
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v4
|
||||||
|
with:
|
||||||
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
|
# see https://github.com/docker/metadata-action
|
||||||
|
# will push the following tags:
|
||||||
|
# :edge
|
||||||
|
# :main (+ any other branches enabled in the workflow)
|
||||||
|
# :<tag>
|
||||||
|
# :1.2.3 (for semver tags)
|
||||||
|
# :1.2 (for semver tags)
|
||||||
|
# :<sha>
|
||||||
|
tags: |
|
||||||
|
type=edge,branch=main
|
||||||
|
type=ref,event=branch
|
||||||
|
type=ref,event=tag
|
||||||
|
type=semver,pattern={{version}}
|
||||||
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
|
type=sha
|
||||||
|
# suffix image tags with architecture
|
||||||
|
flavor: |
|
||||||
|
latest=auto
|
||||||
|
suffix=-${{ matrix.arch }},latest=true
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|
||||||
|
# do not login to container registry on PRs
|
||||||
|
- if: github.event_name != 'pull_request'
|
||||||
|
name: Docker login
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and push cloud image
|
||||||
|
uses: docker/build-push-action@v3
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
file: docker-build/Dockerfile.cloud
|
||||||
|
platforms: Linux/${{ matrix.arch }}
|
||||||
|
# do not push the image on PRs
|
||||||
|
push: false
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
74
.github/workflows/build-container.yml
vendored
@ -1,48 +1,74 @@
|
|||||||
# Building the Image without pushing to confirm it is still buildable
|
|
||||||
# confirum functionality would unfortunately need way more resources
|
|
||||||
name: build container image
|
name: build container image
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'main'
|
- 'main'
|
||||||
- 'development'
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docker:
|
docker:
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
arch:
|
registry:
|
||||||
- x86_64
|
- ghcr.io
|
||||||
- aarch64
|
flavor:
|
||||||
|
- amd
|
||||||
|
- cuda
|
||||||
|
# - cloud
|
||||||
include:
|
include:
|
||||||
- arch: x86_64
|
- flavor: amd
|
||||||
conda-env-file: environment-lin-cuda.yml
|
pip-requirements: requirements-lin-amd.txt
|
||||||
- arch: aarch64
|
dockerfile: docker-build/Dockerfile
|
||||||
conda-env-file: environment-lin-aarch64.yml
|
platforms: linux/amd64,linux/arm64
|
||||||
|
- flavor: cuda
|
||||||
|
pip-requirements: requirements-lin-cuda.txt
|
||||||
|
dockerfile: docker-build/Dockerfile
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
# - flavor: cloud
|
||||||
|
# pip-requirements: requirements-lin-cuda.txt
|
||||||
|
# dockerfile: docker-build/Dockerfile.cloud
|
||||||
|
# platforms: linux/amd64
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: ${{ matrix.arch }}
|
name: ${{ matrix.flavor }}
|
||||||
steps:
|
steps:
|
||||||
- name: prepare docker-tag
|
|
||||||
env:
|
|
||||||
repository: ${{ github.repository }}
|
|
||||||
run: echo "dockertag=${repository,,}" >> $GITHUB_ENV
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v2
|
||||||
|
|
||||||
|
- name: Docker meta
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v4
|
||||||
|
with:
|
||||||
|
images: ${{ matrix.registry }}/${{ github.repository }}-${{ matrix.flavor }}
|
||||||
|
tags: |
|
||||||
|
type=ref,event=branch
|
||||||
|
type=ref,event=tag
|
||||||
|
type=semver,pattern={{version}}
|
||||||
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
|
type=sha
|
||||||
|
flavor: |
|
||||||
|
latest=true
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|
||||||
|
- if: github.event_name != 'pull_request'
|
||||||
|
name: Docker login
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
registry: ${{ matrix.registry }}
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build container
|
- name: Build container
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: docker-build/Dockerfile
|
file: ${{ matrix.dockerfile }}
|
||||||
platforms: Linux/${{ matrix.arch }}
|
platforms: ${{ matrix.platforms }}
|
||||||
push: false
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: ${{ env.dockertag }}:${{ matrix.arch }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
build-args: |
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
conda_env_file=${{ matrix.conda-env-file }}
|
build-args: pip_requirements=${{ matrix.pip-requirements }}
|
||||||
conda_version=py39_4.12.0-Linux-${{ matrix.arch }}
|
|
||||||
invokeai_git=${{ github.repository }}
|
|
||||||
invokeai_branch=${{ github.ref_name }}
|
|
||||||
|
28
.github/workflows/lint-frontend.yml
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
name: Lint frontend
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- 'frontend/**'
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- 'frontend/**'
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: frontend
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint-frontend:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
steps:
|
||||||
|
- name: Setup Node 18
|
||||||
|
uses: actions/setup-node@v3
|
||||||
|
with:
|
||||||
|
node-version: '18'
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- run: 'yarn install --frozen-lockfile'
|
||||||
|
- run: 'yarn tsc'
|
||||||
|
- run: 'yarn run madge'
|
||||||
|
- run: 'yarn run lint --max-warnings=0'
|
||||||
|
- run: 'yarn run prettier --check'
|
2
.github/workflows/mkdocs-material.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
|||||||
- name: install requirements
|
- name: install requirements
|
||||||
run: |
|
run: |
|
||||||
python -m \
|
python -m \
|
||||||
pip install -r requirements-mkdocs.txt
|
pip install -r docs/requirements-mkdocs.txt
|
||||||
|
|
||||||
- name: confirm buildability
|
- name: confirm buildability
|
||||||
run: |
|
run: |
|
||||||
|
19
.github/workflows/pyflakes.yml
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- development
|
||||||
|
- 'release-candidate-*'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
pyflakes:
|
||||||
|
name: runner / pyflakes
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: pyflakes
|
||||||
|
uses: reviewdog/action-pyflakes@v1
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
reporter: github-pr-review
|
121
.github/workflows/test-invoke-conda.yml
vendored
@ -3,41 +3,68 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'main'
|
- 'main'
|
||||||
- 'development'
|
|
||||||
- 'fix-gh-actions-fork'
|
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
- 'main'
|
- 'main'
|
||||||
- 'development'
|
types:
|
||||||
|
- 'ready_for_review'
|
||||||
|
- 'opened'
|
||||||
|
- 'synchronize'
|
||||||
|
- 'converted_to_draft'
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
fail_if_pull_request_is_draft:
|
||||||
|
if: github.event.pull_request.draft == true
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
steps:
|
||||||
|
- name: Fails in order to indicate that pull request needs to be marked as ready to review and unit tests workflow needs to pass.
|
||||||
|
run: exit 1
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
|
if: github.event.pull_request.draft == false
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
matrix:
|
||||||
stable-diffusion-model:
|
stable-diffusion-model:
|
||||||
# - 'https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt'
|
- 'stable-diffusion-1.5'
|
||||||
- 'https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt'
|
environment-yaml:
|
||||||
os:
|
- environment-lin-amd.yml
|
||||||
- ubuntu-latest
|
- environment-lin-cuda.yml
|
||||||
- macOS-12
|
- environment-mac.yml
|
||||||
|
- environment-win-cuda.yml
|
||||||
include:
|
include:
|
||||||
- os: ubuntu-latest
|
- environment-yaml: environment-lin-amd.yml
|
||||||
environment-file: environment-lin-cuda.yml
|
os: ubuntu-22.04
|
||||||
|
curl-command: curl
|
||||||
|
github-env: $GITHUB_ENV
|
||||||
default-shell: bash -l {0}
|
default-shell: bash -l {0}
|
||||||
- os: macOS-12
|
- environment-yaml: environment-lin-cuda.yml
|
||||||
environment-file: environment-mac.yml
|
os: ubuntu-22.04
|
||||||
|
curl-command: curl
|
||||||
|
github-env: $GITHUB_ENV
|
||||||
default-shell: bash -l {0}
|
default-shell: bash -l {0}
|
||||||
# - stable-diffusion-model: https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt
|
- environment-yaml: environment-mac.yml
|
||||||
# stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1/sd-v1-4.ckpt
|
os: macos-12
|
||||||
# stable-diffusion-model-switch: stable-diffusion-1.4
|
curl-command: curl
|
||||||
- stable-diffusion-model: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
github-env: $GITHUB_ENV
|
||||||
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt
|
default-shell: bash -l {0}
|
||||||
stable-diffusion-model-switch: stable-diffusion-1.5
|
- environment-yaml: environment-win-cuda.yml
|
||||||
name: ${{ matrix.os }} with ${{ matrix.stable-diffusion-model-switch }}
|
os: windows-2022
|
||||||
|
curl-command: curl.exe
|
||||||
|
github-env: $env:GITHUB_ENV
|
||||||
|
default-shell: pwsh
|
||||||
|
- stable-diffusion-model: stable-diffusion-1.5
|
||||||
|
stable-diffusion-model-url: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||||
|
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1
|
||||||
|
stable-diffusion-model-dl-name: v1-5-pruned-emaonly.ckpt
|
||||||
|
name: ${{ matrix.environment-yaml }} on ${{ matrix.os }}
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
env:
|
env:
|
||||||
CONDA_ENV_NAME: invokeai
|
CONDA_ENV_NAME: invokeai
|
||||||
|
INVOKEAI_ROOT: '${{ github.workspace }}/invokeai'
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: ${{ matrix.default-shell }}
|
shell: ${{ matrix.default-shell }}
|
||||||
@ -47,17 +74,19 @@ jobs:
|
|||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: create models.yaml from example
|
- name: create models.yaml from example
|
||||||
run: cp configs/models.yaml.example configs/models.yaml
|
run: |
|
||||||
|
mkdir -p ${{ env.INVOKEAI_ROOT }}/configs
|
||||||
|
cp configs/models.yaml.example ${{ env.INVOKEAI_ROOT }}/configs/models.yaml
|
||||||
|
|
||||||
- name: create environment.yml
|
- name: create environment.yml
|
||||||
run: cp environments-and-requirements/${{ matrix.environment-file }} environment.yml
|
run: cp "environments-and-requirements/${{ matrix.environment-yaml }}" environment.yml
|
||||||
|
|
||||||
- name: Use cached conda packages
|
- name: Use cached conda packages
|
||||||
id: use-cached-conda-packages
|
id: use-cached-conda-packages
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: ~/conda_pkgs_dir
|
path: ~/conda_pkgs_dir
|
||||||
key: conda-pkgs-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles(matrix.environment-file) }}
|
key: conda-pkgs-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles(matrix.environment-yaml) }}
|
||||||
|
|
||||||
- name: Activate Conda Env
|
- name: Activate Conda Env
|
||||||
id: activate-conda-env
|
id: activate-conda-env
|
||||||
@ -69,58 +98,64 @@ jobs:
|
|||||||
|
|
||||||
- name: set test prompt to main branch validation
|
- name: set test prompt to main branch validation
|
||||||
if: ${{ github.ref == 'refs/heads/main' }}
|
if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> $GITHUB_ENV
|
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
|
||||||
|
|
||||||
- name: set test prompt to development branch validation
|
- name: set test prompt to development branch validation
|
||||||
if: ${{ github.ref == 'refs/heads/development' }}
|
if: ${{ github.ref == 'refs/heads/development' }}
|
||||||
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> $GITHUB_ENV
|
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> ${{ matrix.github-env }}
|
||||||
|
|
||||||
- name: set test prompt to Pull Request validation
|
- name: set test prompt to Pull Request validation
|
||||||
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> $GITHUB_ENV
|
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
||||||
|
|
||||||
- name: Use Cached Stable Diffusion Model
|
- name: Use Cached Stable Diffusion Model
|
||||||
id: cache-sd-model
|
id: cache-sd-model
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
env:
|
env:
|
||||||
cache-name: cache-${{ matrix.stable-diffusion-model-switch }}
|
cache-name: cache-${{ matrix.stable-diffusion-model }}
|
||||||
with:
|
with:
|
||||||
path: ${{ matrix.stable-diffusion-model-dl-path }}
|
path: ${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}
|
||||||
key: ${{ env.cache-name }}
|
key: ${{ env.cache-name }}
|
||||||
|
|
||||||
- name: Download ${{ matrix.stable-diffusion-model-switch }}
|
- name: Download ${{ matrix.stable-diffusion-model }}
|
||||||
id: download-stable-diffusion-model
|
id: download-stable-diffusion-model
|
||||||
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
|
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
|
||||||
run: |
|
run: |
|
||||||
[[ -d models/ldm/stable-diffusion-v1 ]] \
|
mkdir -p "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}"
|
||||||
|| mkdir -p models/ldm/stable-diffusion-v1
|
${{ matrix.curl-command }} -H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" -o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" -L ${{ matrix.stable-diffusion-model-url }}
|
||||||
curl \
|
|
||||||
-H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" \
|
|
||||||
-o ${{ matrix.stable-diffusion-model-dl-path }} \
|
|
||||||
-L ${{ matrix.stable-diffusion-model }}
|
|
||||||
|
|
||||||
- name: run preload_models.py
|
- name: run configure_invokeai.py
|
||||||
id: run-preload-models
|
id: run-preload-models
|
||||||
run: |
|
run: |
|
||||||
python scripts/preload_models.py \
|
python scripts/configure_invokeai.py --skip-sd-weights --yes
|
||||||
--no-interactive
|
|
||||||
|
- name: cat invokeai.init
|
||||||
|
id: cat-invokeai
|
||||||
|
run: cat ${{ env.INVOKEAI_ROOT }}/invokeai.init
|
||||||
|
|
||||||
- name: Run the tests
|
- name: Run the tests
|
||||||
id: run-tests
|
id: run-tests
|
||||||
|
if: matrix.os != 'windows-2022'
|
||||||
run: |
|
run: |
|
||||||
time python scripts/invoke.py \
|
time python scripts/invoke.py \
|
||||||
--model ${{ matrix.stable-diffusion-model-switch }} \
|
--no-patchmatch \
|
||||||
--from_file ${{ env.TEST_PROMPTS }}
|
--no-nsfw_checker \
|
||||||
|
--model ${{ matrix.stable-diffusion-model }} \
|
||||||
|
--from_file ${{ env.TEST_PROMPTS }} \
|
||||||
|
--root="${{ env.INVOKEAI_ROOT }}" \
|
||||||
|
--outdir="${{ env.INVOKEAI_ROOT }}/outputs"
|
||||||
|
|
||||||
- name: export conda env
|
- name: export conda env
|
||||||
id: export-conda-env
|
id: export-conda-env
|
||||||
|
if: matrix.os != 'windows-2022'
|
||||||
run: |
|
run: |
|
||||||
mkdir -p outputs/img-samples
|
mkdir -p outputs/img-samples
|
||||||
conda env export --name ${{ env.CONDA_ENV_NAME }} > outputs/img-samples/environment-${{ runner.os }}-${{ runner.arch }}.yml
|
conda env export --name ${{ env.CONDA_ENV_NAME }} > ${{ env.INVOKEAI_ROOT }}/outputs/environment-${{ runner.os }}-${{ runner.arch }}.yml
|
||||||
|
|
||||||
- name: Archive results
|
- name: Archive results
|
||||||
|
if: matrix.os != 'windows-2022'
|
||||||
id: archive-results
|
id: archive-results
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: results_${{ matrix.os }}_${{ matrix.stable-diffusion-model-switch }}
|
name: results_${{ matrix.requirements-file }}_${{ matrix.python-version }}
|
||||||
path: outputs/img-samples
|
path: ${{ env.INVOKEAI_ROOT }}/outputs
|
||||||
|
144
.github/workflows/test-invoke-pip.yml
vendored
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
name: Test invoke.py pip
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'main'
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- 'main'
|
||||||
|
types:
|
||||||
|
- 'ready_for_review'
|
||||||
|
- 'opened'
|
||||||
|
- 'synchronize'
|
||||||
|
- 'converted_to_draft'
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
fail_if_pull_request_is_draft:
|
||||||
|
if: github.event.pull_request.draft == true
|
||||||
|
runs-on: ubuntu-18.04
|
||||||
|
steps:
|
||||||
|
- name: Fails in order to indicate that pull request needs to be marked as ready to review and unit tests workflow needs to pass.
|
||||||
|
run: exit 1
|
||||||
|
matrix:
|
||||||
|
if: github.event.pull_request.draft == false
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
stable-diffusion-model:
|
||||||
|
- stable-diffusion-1.5
|
||||||
|
requirements-file:
|
||||||
|
- requirements-lin-cuda.txt
|
||||||
|
- requirements-lin-amd.txt
|
||||||
|
- requirements-mac-mps-cpu.txt
|
||||||
|
- requirements-win-colab-cuda.txt
|
||||||
|
python-version:
|
||||||
|
# - '3.9'
|
||||||
|
- '3.10'
|
||||||
|
include:
|
||||||
|
- requirements-file: requirements-lin-cuda.txt
|
||||||
|
os: ubuntu-22.04
|
||||||
|
curl-command: curl
|
||||||
|
github-env: $GITHUB_ENV
|
||||||
|
- requirements-file: requirements-lin-amd.txt
|
||||||
|
os: ubuntu-22.04
|
||||||
|
curl-command: curl
|
||||||
|
github-env: $GITHUB_ENV
|
||||||
|
- requirements-file: requirements-mac-mps-cpu.txt
|
||||||
|
os: macOS-12
|
||||||
|
curl-command: curl
|
||||||
|
github-env: $GITHUB_ENV
|
||||||
|
- requirements-file: requirements-win-colab-cuda.txt
|
||||||
|
os: windows-2022
|
||||||
|
curl-command: curl.exe
|
||||||
|
github-env: $env:GITHUB_ENV
|
||||||
|
- stable-diffusion-model: stable-diffusion-1.5
|
||||||
|
stable-diffusion-model-url: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||||
|
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1
|
||||||
|
stable-diffusion-model-dl-name: v1-5-pruned-emaonly.ckpt
|
||||||
|
name: ${{ matrix.requirements-file }} on ${{ matrix.python-version }}
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout sources
|
||||||
|
id: checkout-sources
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: set INVOKEAI_ROOT Windows
|
||||||
|
if: matrix.os == 'windows-2022'
|
||||||
|
run: |
|
||||||
|
echo "INVOKEAI_ROOT=${{ github.workspace }}\invokeai" >> ${{ matrix.github-env }}
|
||||||
|
echo "INVOKEAI_OUTDIR=${{ github.workspace }}\invokeai\outputs" >> ${{ matrix.github-env }}
|
||||||
|
|
||||||
|
- name: set INVOKEAI_ROOT others
|
||||||
|
if: matrix.os != 'windows-2022'
|
||||||
|
run: |
|
||||||
|
echo "INVOKEAI_ROOT=${{ github.workspace }}/invokeai" >> ${{ matrix.github-env }}
|
||||||
|
echo "INVOKEAI_OUTDIR=${{ github.workspace }}/invokeai/outputs" >> ${{ matrix.github-env }}
|
||||||
|
|
||||||
|
- name: create models.yaml from example
|
||||||
|
run: |
|
||||||
|
mkdir -p ${{ env.INVOKEAI_ROOT }}/configs
|
||||||
|
cp configs/models.yaml.example ${{ env.INVOKEAI_ROOT }}/configs/models.yaml
|
||||||
|
|
||||||
|
- name: set test prompt to main branch validation
|
||||||
|
if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
|
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
|
||||||
|
|
||||||
|
- name: set test prompt to development branch validation
|
||||||
|
if: ${{ github.ref == 'refs/heads/development' }}
|
||||||
|
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> ${{ matrix.github-env }}
|
||||||
|
|
||||||
|
- name: set test prompt to Pull Request validation
|
||||||
|
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||||
|
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
||||||
|
|
||||||
|
- name: create requirements.txt
|
||||||
|
run: cp 'environments-and-requirements/${{ matrix.requirements-file }}' '${{ matrix.requirements-file }}'
|
||||||
|
|
||||||
|
- name: setup python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
# cache: 'pip'
|
||||||
|
# cache-dependency-path: ${{ matrix.requirements-file }}
|
||||||
|
|
||||||
|
- name: install dependencies
|
||||||
|
run: pip3 install --upgrade pip setuptools wheel
|
||||||
|
|
||||||
|
- name: install requirements
|
||||||
|
run: pip3 install -r '${{ matrix.requirements-file }}'
|
||||||
|
|
||||||
|
- name: Use Cached Stable Diffusion Model
|
||||||
|
id: cache-sd-model
|
||||||
|
uses: actions/cache@v3
|
||||||
|
env:
|
||||||
|
cache-name: cache-${{ matrix.stable-diffusion-model }}
|
||||||
|
with:
|
||||||
|
path: ${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}
|
||||||
|
key: ${{ env.cache-name }}
|
||||||
|
|
||||||
|
- name: Download ${{ matrix.stable-diffusion-model }}
|
||||||
|
id: download-stable-diffusion-model
|
||||||
|
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
|
||||||
|
run: |
|
||||||
|
mkdir -p "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}"
|
||||||
|
${{ matrix.curl-command }} -H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" -o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" -L ${{ matrix.stable-diffusion-model-url }}
|
||||||
|
|
||||||
|
- name: run configure_invokeai.py
|
||||||
|
id: run-preload-models
|
||||||
|
run: python3 scripts/configure_invokeai.py --skip-sd-weights --yes
|
||||||
|
|
||||||
|
- name: Run the tests
|
||||||
|
id: run-tests
|
||||||
|
if: matrix.os != 'windows-2022'
|
||||||
|
run: python3 scripts/invoke.py --no-patchmatch --no-nsfw_checker --model ${{ matrix.stable-diffusion-model }} --from_file ${{ env.TEST_PROMPTS }} --root="${{ env.INVOKEAI_ROOT }}" --outdir="${{ env.INVOKEAI_OUTDIR }}"
|
||||||
|
|
||||||
|
- name: Archive results
|
||||||
|
id: archive-results
|
||||||
|
if: matrix.os != 'windows-2022'
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: results_${{ matrix.requirements-file }}_${{ matrix.python-version }}
|
||||||
|
path: ${{ env.INVOKEAI_ROOT }}/outputs
|
21
.gitignore
vendored
@ -6,6 +6,7 @@ models/ldm/stable-diffusion-v1/model.ckpt
|
|||||||
# ignore user models config
|
# ignore user models config
|
||||||
configs/models.user.yaml
|
configs/models.user.yaml
|
||||||
config/models.user.yml
|
config/models.user.yml
|
||||||
|
invokeai.init
|
||||||
|
|
||||||
# ignore the Anaconda/Miniconda installer used while building Docker image
|
# ignore the Anaconda/Miniconda installer used while building Docker image
|
||||||
anaconda.sh
|
anaconda.sh
|
||||||
@ -194,10 +195,6 @@ checkpoints
|
|||||||
|
|
||||||
# Let the frontend manage its own gitignore
|
# Let the frontend manage its own gitignore
|
||||||
!frontend/*
|
!frontend/*
|
||||||
frontend/apt-get
|
|
||||||
frontend/dist
|
|
||||||
frontend/sudo
|
|
||||||
frontend/update
|
|
||||||
|
|
||||||
# Scratch folder
|
# Scratch folder
|
||||||
.scratch/
|
.scratch/
|
||||||
@ -218,7 +215,7 @@ models/clipseg
|
|||||||
models/gfpgan
|
models/gfpgan
|
||||||
|
|
||||||
# ignore initfile
|
# ignore initfile
|
||||||
invokeai.init
|
.invokeai
|
||||||
|
|
||||||
# ignore environment.yml and requirements.txt
|
# ignore environment.yml and requirements.txt
|
||||||
# these are links to the real files in environments-and-requirements
|
# these are links to the real files in environments-and-requirements
|
||||||
@ -226,12 +223,14 @@ environment.yml
|
|||||||
requirements.txt
|
requirements.txt
|
||||||
|
|
||||||
# source installer files
|
# source installer files
|
||||||
source_installer/*zip
|
installer/*zip
|
||||||
source_installer/invokeAI
|
installer/install.bat
|
||||||
install.bat
|
installer/install.sh
|
||||||
install.sh
|
installer/update.bat
|
||||||
update.bat
|
installer/update.sh
|
||||||
update.sh
|
|
||||||
|
|
||||||
# this may be present if the user created a venv
|
# this may be present if the user created a venv
|
||||||
invokeai
|
invokeai
|
||||||
|
|
||||||
|
# no longer stored in source directory
|
||||||
|
models
|
128
CODE_OF_CONDUCT.md
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
We as members, contributors, and leaders pledge to make participation in our
|
||||||
|
community a harassment-free experience for everyone, regardless of age, body
|
||||||
|
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||||
|
identity and expression, level of experience, education, socio-economic status,
|
||||||
|
nationality, personal appearance, race, religion, or sexual identity
|
||||||
|
and orientation.
|
||||||
|
|
||||||
|
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||||
|
diverse, inclusive, and healthy community.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to a positive environment for our
|
||||||
|
community include:
|
||||||
|
|
||||||
|
* Demonstrating empathy and kindness toward other people
|
||||||
|
* Being respectful of differing opinions, viewpoints, and experiences
|
||||||
|
* Giving and gracefully accepting constructive feedback
|
||||||
|
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||||
|
and learning from the experience
|
||||||
|
* Focusing on what is best not just for us as individuals, but for the
|
||||||
|
overall community
|
||||||
|
|
||||||
|
Examples of unacceptable behavior include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery, and sexual attention or
|
||||||
|
advances of any kind
|
||||||
|
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or email
|
||||||
|
address, without their explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
|
professional setting
|
||||||
|
|
||||||
|
## Enforcement Responsibilities
|
||||||
|
|
||||||
|
Community leaders are responsible for clarifying and enforcing our standards of
|
||||||
|
acceptable behavior and will take appropriate and fair corrective action in
|
||||||
|
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||||
|
or harmful.
|
||||||
|
|
||||||
|
Community leaders have the right and responsibility to remove, edit, or reject
|
||||||
|
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||||
|
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||||
|
decisions when appropriate.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies within all community spaces, and also applies when
|
||||||
|
an individual is officially representing the community in public spaces.
|
||||||
|
Examples of representing our community include using an official e-mail address,
|
||||||
|
posting via an official social media account, or acting as an appointed
|
||||||
|
representative at an online or offline event.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior
|
||||||
|
may be reported to the community leaders responsible for enforcement
|
||||||
|
at https://github.com/invoke-ai/InvokeAI/issues. All complaints will
|
||||||
|
be reviewed and investigated promptly and fairly.
|
||||||
|
|
||||||
|
All community leaders are obligated to respect the privacy and security of the
|
||||||
|
reporter of any incident.
|
||||||
|
|
||||||
|
## Enforcement Guidelines
|
||||||
|
|
||||||
|
Community leaders will follow these Community Impact Guidelines in determining
|
||||||
|
the consequences for any action they deem in violation of this Code of Conduct:
|
||||||
|
|
||||||
|
### 1. Correction
|
||||||
|
|
||||||
|
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||||
|
unprofessional or unwelcome in the community.
|
||||||
|
|
||||||
|
**Consequence**: A private, written warning from community leaders, providing
|
||||||
|
clarity around the nature of the violation and an explanation of why the
|
||||||
|
behavior was inappropriate. A public apology may be requested.
|
||||||
|
|
||||||
|
### 2. Warning
|
||||||
|
|
||||||
|
**Community Impact**: A violation through a single incident or series
|
||||||
|
of actions.
|
||||||
|
|
||||||
|
**Consequence**: A warning with consequences for continued behavior. No
|
||||||
|
interaction with the people involved, including unsolicited interaction with
|
||||||
|
those enforcing the Code of Conduct, for a specified period of time. This
|
||||||
|
includes avoiding interactions in community spaces as well as external channels
|
||||||
|
like social media. Violating these terms may lead to a temporary or
|
||||||
|
permanent ban.
|
||||||
|
|
||||||
|
### 3. Temporary Ban
|
||||||
|
|
||||||
|
**Community Impact**: A serious violation of community standards, including
|
||||||
|
sustained inappropriate behavior.
|
||||||
|
|
||||||
|
**Consequence**: A temporary ban from any sort of interaction or public
|
||||||
|
communication with the community for a specified period of time. No public or
|
||||||
|
private interaction with the people involved, including unsolicited interaction
|
||||||
|
with those enforcing the Code of Conduct, is allowed during this period.
|
||||||
|
Violating these terms may lead to a permanent ban.
|
||||||
|
|
||||||
|
### 4. Permanent Ban
|
||||||
|
|
||||||
|
**Community Impact**: Demonstrating a pattern of violation of community
|
||||||
|
standards, including sustained inappropriate behavior, harassment of an
|
||||||
|
individual, or aggression toward or disparagement of classes of individuals.
|
||||||
|
|
||||||
|
**Consequence**: A permanent ban from any sort of public interaction within
|
||||||
|
the community.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||||
|
version 2.0, available at
|
||||||
|
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||||
|
|
||||||
|
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||||
|
enforcement ladder](https://github.com/mozilla/diversity).
|
||||||
|
|
||||||
|
[homepage]: https://www.contributor-covenant.org
|
||||||
|
|
||||||
|
For answers to common questions about this code of conduct, see the FAQ at
|
||||||
|
https://www.contributor-covenant.org/faq. Translations are available at
|
||||||
|
https://www.contributor-covenant.org/translations.
|
84
InvokeAI_Statement_of_Values.md
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
<img src="docs/assets/invoke_ai_banner.png" align="center">
|
||||||
|
|
||||||
|
Invoke-AI is a community of software developers, researchers, and user
|
||||||
|
interface experts who have come together on a voluntary basis to build
|
||||||
|
software tools which support cutting edge AI text-to-image
|
||||||
|
applications. This community is open to anyone who wishes to
|
||||||
|
contribute to the effort and has the skill and time to do so.
|
||||||
|
|
||||||
|
# Our Values
|
||||||
|
|
||||||
|
The InvokeAI team is a diverse community which includes individuals
|
||||||
|
from various parts of the world and many walks of life. Despite our
|
||||||
|
differences, we share a number of core values which we ask prospective
|
||||||
|
contributors to understand and respect. We believe:
|
||||||
|
|
||||||
|
1. That Open Source Software is a positive force in the world. We
|
||||||
|
create software that can be used, reused, and redistributed, without
|
||||||
|
restrictions, under a straightforward Open Source license (MIT). We
|
||||||
|
believe that Open Source benefits society as a whole by increasing the
|
||||||
|
availability of high quality software to all.
|
||||||
|
|
||||||
|
2. That those who create software should receive proper attribution
|
||||||
|
for their creative work. While we support the exchange and reuse of
|
||||||
|
Open Source Software, we feel strongly that the original authors of a
|
||||||
|
piece of code should receive credit for their contribution, and we
|
||||||
|
endeavor to do so whenever possible.
|
||||||
|
|
||||||
|
3. That there is moral ambiguity surrounding AI-assisted art. We are
|
||||||
|
aware of the moral and ethical issues surrounding the release of the
|
||||||
|
Stable Diffusion model and similar products. We are aware that, due to
|
||||||
|
the composition of their training sets, current AI-generated image
|
||||||
|
models are biased against certain ethnic groups, cultural concepts of
|
||||||
|
beauty, ethnic stereotypes, and gender roles.
|
||||||
|
|
||||||
|
1. We recognize the potential for harm to these groups that these biases
|
||||||
|
represent and trust that future AI models will take steps towards
|
||||||
|
reducing or eliminating the biases noted above, respect and give due
|
||||||
|
credit to the artists whose work is sourced, and call on developers
|
||||||
|
and users to favor these models over the older ones as they become
|
||||||
|
available.
|
||||||
|
|
||||||
|
4. We are deeply committed to ensuring that this technology benefits
|
||||||
|
everyone, including artists. We see AI art not as a replacement for
|
||||||
|
the artist, but rather as a tool to empower them. With that
|
||||||
|
in mind, we are constantly debating how to build systems that put
|
||||||
|
artists’ needs first: tools which can be readily integrated into an
|
||||||
|
artist’s existing workflows and practices, enhancing their work and
|
||||||
|
helping them to push it further. Every decision we take as a team,
|
||||||
|
which includes several artists, aims to build towards that goal.
|
||||||
|
|
||||||
|
5. That artificial intelligence can be a force for good in the world,
|
||||||
|
but must be used responsibly. Artificial intelligence technologies
|
||||||
|
have the potential to improve society, in everything from cancer care,
|
||||||
|
to customer service, to creative writing.
|
||||||
|
|
||||||
|
1. While we do not believe that software should arbitrarily limit what
|
||||||
|
users can do with it, we recognize that when used irresponsibly, AI
|
||||||
|
has the potential to do much harm. Our Discord server is actively
|
||||||
|
moderated in order to minimize the potential of harm from
|
||||||
|
user-contributed images. In addition, we ask users of our software to
|
||||||
|
refrain from using it in any way that would cause mental, emotional or
|
||||||
|
physical harm to individuals and vulnerable populations including (but
|
||||||
|
not limited to) women; minors; ethnic minorities; religious groups;
|
||||||
|
members of LGBTQIA communities; and people with disabilities or
|
||||||
|
impairments.
|
||||||
|
|
||||||
|
2. Note that some of the image generation AI models which the Invoke-AI
|
||||||
|
toolkit supports carry licensing agreements which impose restrictions
|
||||||
|
on how the model is used. We ask that our users read and agree to
|
||||||
|
these terms if they wish to make use of these models. These agreements
|
||||||
|
are distinct from the MIT license which applies to the InvokeAI
|
||||||
|
software and source code.
|
||||||
|
|
||||||
|
6. That mutual respect is key to a healthy software development
|
||||||
|
community. Members of the InvokeAI community are expected to treat
|
||||||
|
each other with respect, beneficence, and empathy. Each of us has a
|
||||||
|
different background and a unique set of skills. We strive to help
|
||||||
|
each other grow and gain new skills, and we apportion expectations in
|
||||||
|
a way that balances the members' time, skillset, and interest
|
||||||
|
area. Disputes are resolved by open and honest communication.
|
||||||
|
|
||||||
|
## Signature
|
||||||
|
|
||||||
|
This document has been collectively crafted and approved by the current InvokeAI team members, as of 28 Nov 2022: **lstein** (Lincoln Stein), **blessedcoolant**, **hipsterusername** (Kent Keirsey), **Kyle0654** (Kyle Schouviller), **damian0815**, **mauwii** (Matthias Wild), **Netsvetaev** (Artur Netsvetaev), **psychedelicious**, **tildebyte**, **keturn**, and **ebr** (Eugene Brodsky). Although individuals within the group may hold differing views on particular details and/or their implications, we are all in agreement about its fundamental statements, as well as their significance and importance to this project moving forward.
|
98
README.md
@ -1,11 +1,9 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
# InvokeAI: A Stable Diffusion Toolkit
|
# InvokeAI: A Stable Diffusion Toolkit
|
||||||
|
|
||||||
_Formerly known as lstein/stable-diffusion_
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
[![discord badge]][discord link]
|
[![discord badge]][discord link]
|
||||||
|
|
||||||
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]
|
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]
|
||||||
@ -38,18 +36,33 @@ This is a fork of
|
|||||||
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion),
|
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion),
|
||||||
the open source text-to-image generator. It provides a streamlined
|
the open source text-to-image generator. It provides a streamlined
|
||||||
process with various new features and options to aid the image
|
process with various new features and options to aid the image
|
||||||
generation process. It runs on Windows, Mac and Linux machines, with
|
generation process. It runs on Windows, macOS and Linux machines, with
|
||||||
GPU cards with as little as 4 GB of RAM. It provides both a polished
|
GPU cards with as little as 4 GB of RAM. It provides both a polished
|
||||||
Web interface (see below), and an easy-to-use command-line interface.
|
Web interface (see below), and an easy-to-use command-line interface.
|
||||||
|
|
||||||
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
**Quick links**: [[How to Install](#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||||
|
|
||||||
|
_Note: InvokeAI is rapidly evolving. Please use the
|
||||||
|
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||||
|
requests. Be sure to use the provided templates. They will help us diagnose issues faster._
|
||||||
|
|
||||||
|
# Getting Started with InvokeAI
|
||||||
|
|
||||||
|
For full installation and upgrade instructions, please see:
|
||||||
|
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||||
|
|
||||||
|
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
|
||||||
|
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
||||||
|
3. Unzip the file.
|
||||||
|
4. If you are on Windows, double-click on the `install.bat` script. On macOS, open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press return. On Linux, run `install.sh`.
|
||||||
|
5. Wait a while, until it is done.
|
||||||
|
6. The folder where you ran the installer from will now be filled with lots of files. If you are on Windows, double-click on the `invoke.bat` file. On macOS, open a Terminal window, drag `invoke.sh` from the folder into the Terminal, and press return. On Linux, run `invoke.sh`
|
||||||
|
7. Press 2 to open the "browser-based UI", press enter/return, wait a minute or two for Stable Diffusion to start up, then open your browser and go to http://localhost:9090.
|
||||||
|
8. Type `banana sushi` in the box on the top left and click `Invoke`:
|
||||||
|
|
||||||
<div align="center"><img src="docs/assets/invoke-web-server-1.png" width=640></div>
|
<div align="center"><img src="docs/assets/invoke-web-server-1.png" width=640></div>
|
||||||
|
|
||||||
|
|
||||||
_Note: This fork is rapidly evolving. Please use the
|
|
||||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
|
||||||
requests. Be sure to use the provided templates. They will help aid diagnose issues faster._
|
|
||||||
|
|
||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
|
||||||
@ -69,10 +82,13 @@ This fork is supported across Linux, Windows and Macintosh. Linux
|
|||||||
users can use either an Nvidia-based card (with CUDA support) or an
|
users can use either an Nvidia-based card (with CUDA support) or an
|
||||||
AMD card (using the ROCm driver). For full installation and upgrade
|
AMD card (using the ROCm driver). For full installation and upgrade
|
||||||
instructions, please see:
|
instructions, please see:
|
||||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
|
||||||
|
|
||||||
### Hardware Requirements
|
### Hardware Requirements
|
||||||
|
|
||||||
|
InvokeAI is supported across Linux, Windows and macOS. Linux
|
||||||
|
users can use either an Nvidia-based card (with CUDA support) or an
|
||||||
|
AMD card (using the ROCm driver).
|
||||||
#### System
|
#### System
|
||||||
|
|
||||||
You wil need one of the following:
|
You wil need one of the following:
|
||||||
@ -80,6 +96,10 @@ You wil need one of the following:
|
|||||||
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||||
- An Apple computer with an M1 chip.
|
- An Apple computer with an M1 chip.
|
||||||
|
|
||||||
|
We do not recommend the GTX 1650 or 1660 series video cards. They are
|
||||||
|
unable to run in half-precision mode and do not have sufficient VRAM
|
||||||
|
to render 512x512 images.
|
||||||
|
|
||||||
#### Memory
|
#### Memory
|
||||||
|
|
||||||
- At least 12 GB Main Memory RAM.
|
- At least 12 GB Main Memory RAM.
|
||||||
@ -97,11 +117,12 @@ Similarly, specify full-precision mode on Apple M1 hardware.
|
|||||||
|
|
||||||
Precision is auto configured based on the device. If however you encounter
|
Precision is auto configured based on the device. If however you encounter
|
||||||
errors like 'expected type Float but found Half' or 'not implemented for Half'
|
errors like 'expected type Float but found Half' or 'not implemented for Half'
|
||||||
you can try starting `invoke.py` with the `--precision=float32` flag:
|
you can try starting `invoke.py` with the `--precision=float32` flag to your initialization command
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
(invokeai) ~/InvokeAI$ python scripts/invoke.py --precision=float32
|
(invokeai) ~/InvokeAI$ python scripts/invoke.py --precision=float32
|
||||||
```
|
```
|
||||||
|
Or by updating your InvokeAI configuration file with this argument.
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
|
||||||
@ -130,39 +151,7 @@ you can try starting `invoke.py` with the `--precision=float32` flag:
|
|||||||
|
|
||||||
### Latest Changes
|
### Latest Changes
|
||||||
|
|
||||||
- v2.0.1 (13 October 2022)
|
For our latest changes, view our [Release Notes](https://github.com/invoke-ai/InvokeAI/releases)
|
||||||
- fix noisy images at high step count when using k* samplers
|
|
||||||
- dream.py script now calls invoke.py module directly rather than
|
|
||||||
via a new python process (which could break the environment)
|
|
||||||
|
|
||||||
- v2.0.0 (9 October 2022)
|
|
||||||
|
|
||||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
|
||||||
for backward compatibility.
|
|
||||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
|
||||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/INPAINTING/">inpainting</a> and <a href="https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/">outpainting</a>
|
|
||||||
- img2img runs on all k* samplers
|
|
||||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts">negative prompts</a>
|
|
||||||
- Support for CodeFormer face reconstruction
|
|
||||||
- Support for Textual Inversion on Macintoshes
|
|
||||||
- Support in both WebGUI and CLI for <a href="https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/">post-processing of previously-generated images</a>
|
|
||||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
|
||||||
and "embiggen" upscaling. See the `!fix` command.
|
|
||||||
- New `--hires` option on `invoke>` line allows <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/#txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
|
||||||
- New `--perlin` and `--threshold` options allow you to add and control variation
|
|
||||||
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
|
|
||||||
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
|
||||||
and tweaking of previous settings.
|
|
||||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
|
||||||
- Improved <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/">command-line completion behavior</a>.
|
|
||||||
New commands added:
|
|
||||||
- List command-line history with `!history`
|
|
||||||
- Search command-line history with `!search`
|
|
||||||
- Clear history with `!clear`
|
|
||||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
|
||||||
configure. To switch away from auto use the new flag like `--precision=float32`.
|
|
||||||
|
|
||||||
For older changelogs, please visit the **[CHANGELOG](https://invoke-ai.github.io/InvokeAI/CHANGELOG#v114-11-september-2022)**.
|
|
||||||
|
|
||||||
### Troubleshooting
|
### Troubleshooting
|
||||||
|
|
||||||
@ -172,14 +161,19 @@ problems and other issues.
|
|||||||
# Contributing
|
# Contributing
|
||||||
|
|
||||||
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
||||||
cleanup, testing, or code reviews, is very much encouraged to do so. If you are unfamiliar with how
|
cleanup, testing, or code reviews, is very much encouraged to do so.
|
||||||
to contribute to GitHub projects, here is a
|
|
||||||
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github).
|
|
||||||
|
|
||||||
A full set of contribution guidelines, along with templates, are in progress, but for now the most
|
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
|
||||||
important thing is to **make your pull request against the "development" branch**, and not against
|
|
||||||
"main". This will help keep public breakage to a minimum and will allow you to propose more radical
|
If you are unfamiliar with how
|
||||||
changes.
|
to contribute to GitHub projects, here is a
|
||||||
|
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
|
||||||
|
|
||||||
|
We hope you enjoy using our software as much as we enjoy creating it,
|
||||||
|
and we hope that some of those of you who are reading this will elect
|
||||||
|
to become part of our community.
|
||||||
|
|
||||||
|
Welcome to InvokeAI!
|
||||||
|
|
||||||
### Contributors
|
### Contributors
|
||||||
|
|
||||||
@ -192,7 +186,7 @@ their time, hard work and effort.
|
|||||||
For support, please use this repository's GitHub Issues tracking service. Feel free to send me an
|
For support, please use this repository's GitHub Issues tracking service. Feel free to send me an
|
||||||
email if you use and like the script.
|
email if you use and like the script.
|
||||||
|
|
||||||
Original portions of the software are Copyright (c) 2020
|
Original portions of the software are Copyright (c) 2022
|
||||||
[Lincoln D. Stein](https://github.com/lstein)
|
[Lincoln D. Stein](https://github.com/lstein)
|
||||||
|
|
||||||
### Further Reading
|
### Further Reading
|
||||||
|
0
backend/modules/__init__.py
Normal file
117
backend/modules/get_canvas_generation_mode.py
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
from PIL import Image, ImageChops
|
||||||
|
from PIL.Image import Image as ImageType
|
||||||
|
from typing import Union, Literal
|
||||||
|
|
||||||
|
# https://stackoverflow.com/questions/43864101/python-pil-check-if-image-is-transparent
|
||||||
|
def check_for_any_transparency(img: Union[ImageType, str]) -> bool:
|
||||||
|
if type(img) is str:
|
||||||
|
img = Image.open(str)
|
||||||
|
|
||||||
|
if img.info.get("transparency", None) is not None:
|
||||||
|
return True
|
||||||
|
if img.mode == "P":
|
||||||
|
transparent = img.info.get("transparency", -1)
|
||||||
|
for _, index in img.getcolors():
|
||||||
|
if index == transparent:
|
||||||
|
return True
|
||||||
|
elif img.mode == "RGBA":
|
||||||
|
extrema = img.getextrema()
|
||||||
|
if extrema[3][0] < 255:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_canvas_generation_mode(
|
||||||
|
init_img: Union[ImageType, str], init_mask: Union[ImageType, str]
|
||||||
|
) -> Literal["txt2img", "outpainting", "inpainting", "img2img",]:
|
||||||
|
if type(init_img) is str:
|
||||||
|
init_img = Image.open(init_img)
|
||||||
|
|
||||||
|
if type(init_mask) is str:
|
||||||
|
init_mask = Image.open(init_mask)
|
||||||
|
|
||||||
|
init_img = init_img.convert("RGBA")
|
||||||
|
|
||||||
|
# Get alpha from init_img
|
||||||
|
init_img_alpha = init_img.split()[-1]
|
||||||
|
init_img_alpha_mask = init_img_alpha.convert("L")
|
||||||
|
init_img_has_transparency = check_for_any_transparency(init_img)
|
||||||
|
|
||||||
|
if init_img_has_transparency:
|
||||||
|
init_img_is_fully_transparent = (
|
||||||
|
True if init_img_alpha_mask.getbbox() is None else False
|
||||||
|
)
|
||||||
|
|
||||||
|
"""
|
||||||
|
Mask images are white in areas where no change should be made, black where changes
|
||||||
|
should be made.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Fit the mask to init_img's size and convert it to greyscale
|
||||||
|
init_mask = init_mask.resize(init_img.size).convert("L")
|
||||||
|
|
||||||
|
"""
|
||||||
|
PIL.Image.getbbox() returns the bounding box of non-zero areas of the image, so we first
|
||||||
|
invert the mask image so that masked areas are white and other areas black == zero.
|
||||||
|
getbbox() now tells us if the are any masked areas.
|
||||||
|
"""
|
||||||
|
init_mask_bbox = ImageChops.invert(init_mask).getbbox()
|
||||||
|
init_mask_exists = False if init_mask_bbox is None else True
|
||||||
|
|
||||||
|
if init_img_has_transparency:
|
||||||
|
if init_img_is_fully_transparent:
|
||||||
|
return "txt2img"
|
||||||
|
else:
|
||||||
|
return "outpainting"
|
||||||
|
else:
|
||||||
|
if init_mask_exists:
|
||||||
|
return "inpainting"
|
||||||
|
else:
|
||||||
|
return "img2img"
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Testing
|
||||||
|
init_img_opaque = "test_images/init-img_opaque.png"
|
||||||
|
init_img_partial_transparency = "test_images/init-img_partial_transparency.png"
|
||||||
|
init_img_full_transparency = "test_images/init-img_full_transparency.png"
|
||||||
|
init_mask_no_mask = "test_images/init-mask_no_mask.png"
|
||||||
|
init_mask_has_mask = "test_images/init-mask_has_mask.png"
|
||||||
|
|
||||||
|
print(
|
||||||
|
"OPAQUE IMAGE, NO MASK, expect img2img, got ",
|
||||||
|
get_canvas_generation_mode(init_img_opaque, init_mask_no_mask),
|
||||||
|
)
|
||||||
|
|
||||||
|
print(
|
||||||
|
"IMAGE WITH TRANSPARENCY, NO MASK, expect outpainting, got ",
|
||||||
|
get_canvas_generation_mode(
|
||||||
|
init_img_partial_transparency, init_mask_no_mask
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
print(
|
||||||
|
"FULLY TRANSPARENT IMAGE NO MASK, expect txt2img, got ",
|
||||||
|
get_canvas_generation_mode(init_img_full_transparency, init_mask_no_mask),
|
||||||
|
)
|
||||||
|
|
||||||
|
print(
|
||||||
|
"OPAQUE IMAGE, WITH MASK, expect inpainting, got ",
|
||||||
|
get_canvas_generation_mode(init_img_opaque, init_mask_has_mask),
|
||||||
|
)
|
||||||
|
|
||||||
|
print(
|
||||||
|
"IMAGE WITH TRANSPARENCY, WITH MASK, expect outpainting, got ",
|
||||||
|
get_canvas_generation_mode(
|
||||||
|
init_img_partial_transparency, init_mask_has_mask
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
print(
|
||||||
|
"FULLY TRANSPARENT IMAGE WITH MASK, expect txt2img, got ",
|
||||||
|
get_canvas_generation_mode(init_img_full_transparency, init_mask_has_mask),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@ -5,6 +5,8 @@ SAMPLER_CHOICES = [
|
|||||||
"ddim",
|
"ddim",
|
||||||
"k_dpm_2_a",
|
"k_dpm_2_a",
|
||||||
"k_dpm_2",
|
"k_dpm_2",
|
||||||
|
"k_dpmpp_2_a",
|
||||||
|
"k_dpmpp_2",
|
||||||
"k_euler_a",
|
"k_euler_a",
|
||||||
"k_euler",
|
"k_euler",
|
||||||
"k_heun",
|
"k_heun",
|
||||||
|
BIN
backend/modules/test_images/init-img_full_transparency.png
Normal file
After Width: | Height: | Size: 2.7 KiB |
BIN
backend/modules/test_images/init-img_opaque.png
Normal file
After Width: | Height: | Size: 292 KiB |
BIN
backend/modules/test_images/init-img_partial_transparency.png
Normal file
After Width: | Height: | Size: 164 KiB |
BIN
backend/modules/test_images/init-mask_has_mask.png
Normal file
After Width: | Height: | Size: 9.5 KiB |
BIN
backend/modules/test_images/init-mask_no_mask.png
Normal file
After Width: | Height: | Size: 3.4 KiB |
BIN
binary_installer/WinLongPathsEnabled.reg
Normal file
@ -10,10 +10,15 @@
|
|||||||
|
|
||||||
@rem This enables a user to install this project without manually installing git or Python
|
@rem This enables a user to install this project without manually installing git or Python
|
||||||
|
|
||||||
|
@rem change to the script's directory
|
||||||
|
PUSHD "%~dp0"
|
||||||
|
|
||||||
|
set "no_cache_dir=--no-cache-dir"
|
||||||
|
if "%1" == "use-cache" (
|
||||||
|
set "no_cache_dir="
|
||||||
|
)
|
||||||
|
|
||||||
echo ***** Installing InvokeAI.. *****
|
echo ***** Installing InvokeAI.. *****
|
||||||
|
|
||||||
set PATH=c:\windows\system32
|
|
||||||
|
|
||||||
@rem Config
|
@rem Config
|
||||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||||
@rem https://mamba.readthedocs.io/en/latest/installation.html
|
@rem https://mamba.readthedocs.io/en/latest/installation.html
|
||||||
@ -65,7 +70,8 @@ set PATH=%INSTALL_ENV_DIR%\Library\bin;%PATH%
|
|||||||
|
|
||||||
@rem Download/unpack/clean up InvokeAI release sourceball
|
@rem Download/unpack/clean up InvokeAI release sourceball
|
||||||
set err_msg=----- InvokeAI source download failed -----
|
set err_msg=----- InvokeAI source download failed -----
|
||||||
curl -L %RELEASE_URL%/%RELEASE_SOURCEBALL% --output InvokeAI.tgz
|
echo Trying to download "%RELEASE_URL%%RELEASE_SOURCEBALL%"
|
||||||
|
curl -L %RELEASE_URL%%RELEASE_SOURCEBALL% --output InvokeAI.tgz
|
||||||
if %errorlevel% neq 0 goto err_exit
|
if %errorlevel% neq 0 goto err_exit
|
||||||
|
|
||||||
set err_msg=----- InvokeAI source unpack failed -----
|
set err_msg=----- InvokeAI source unpack failed -----
|
||||||
@ -103,11 +109,8 @@ echo ***** Unpacked python-build-standalone *****
|
|||||||
@rem create venv
|
@rem create venv
|
||||||
set err_msg=----- problem creating venv -----
|
set err_msg=----- problem creating venv -----
|
||||||
.\python\python -E -s -m venv .venv
|
.\python\python -E -s -m venv .venv
|
||||||
@rem In reality, the following is ALL that 'activate.bat' does,
|
|
||||||
@rem aside from setting the prompt, which we don't care about
|
|
||||||
set PYTHONPATH=
|
|
||||||
set PATH=.venv\Scripts;%PATH%
|
|
||||||
if %errorlevel% neq 0 goto err_exit
|
if %errorlevel% neq 0 goto err_exit
|
||||||
|
call .venv\Scripts\activate.bat
|
||||||
|
|
||||||
echo ***** Created Python virtual environment *****
|
echo ***** Created Python virtual environment *****
|
||||||
|
|
||||||
@ -118,43 +121,40 @@ echo We're running under
|
|||||||
if %errorlevel% neq 0 goto err_exit
|
if %errorlevel% neq 0 goto err_exit
|
||||||
|
|
||||||
set err_msg=----- pip update failed -----
|
set err_msg=----- pip update failed -----
|
||||||
.venv\Scripts\python -m pip install --no-cache-dir --no-warn-script-location --upgrade pip
|
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location --upgrade pip wheel
|
||||||
if %errorlevel% neq 0 goto err_exit
|
if %errorlevel% neq 0 goto err_exit
|
||||||
|
|
||||||
echo ***** Updated pip *****
|
echo ***** Updated pip and wheel *****
|
||||||
|
|
||||||
set err_msg=----- requirements file copy failed -----
|
set err_msg=----- requirements file copy failed -----
|
||||||
copy installer\py3.10-windows-x86_64-cuda-reqs.txt requirements.txt
|
copy binary_installer\py3.10-windows-x86_64-cuda-reqs.txt requirements.txt
|
||||||
if %errorlevel% neq 0 goto err_exit
|
if %errorlevel% neq 0 goto err_exit
|
||||||
|
|
||||||
set err_msg=----- main pip install failed -----
|
set err_msg=----- main pip install failed -----
|
||||||
.venv\Scripts\python -m pip install --no-cache-dir --no-warn-script-location -r requirements.txt
|
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -r requirements.txt
|
||||||
if %errorlevel% neq 0 goto err_exit
|
|
||||||
|
|
||||||
set err_msg=----- clipseg install failed -----
|
|
||||||
.venv\Scripts\python -m pip install --no-cache-dir --no-warn-script-location git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
|
||||||
if %errorlevel% neq 0 goto err_exit
|
|
||||||
|
|
||||||
set err_msg=----- InvokeAI setup failed -----
|
|
||||||
.venv\Scripts\python -m pip install --no-cache-dir --no-warn-script-location -e .
|
|
||||||
if %errorlevel% neq 0 goto err_exit
|
if %errorlevel% neq 0 goto err_exit
|
||||||
|
|
||||||
echo ***** Installed Python dependencies *****
|
echo ***** Installed Python dependencies *****
|
||||||
|
|
||||||
|
set err_msg=----- InvokeAI setup failed -----
|
||||||
|
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -e .
|
||||||
|
if %errorlevel% neq 0 goto err_exit
|
||||||
|
|
||||||
|
copy binary_installer\invoke.bat.in .\invoke.bat
|
||||||
|
echo ***** Installed invoke launcher script ******
|
||||||
|
|
||||||
|
@rem more cleanup
|
||||||
|
rd /s /q binary_installer installer_files
|
||||||
|
|
||||||
@rem preload the models
|
@rem preload the models
|
||||||
call .venv\Scripts\python scripts\preload_models.py
|
call .venv\Scripts\python scripts\configure_invokeai.py
|
||||||
set err_msg=----- model download clone failed -----
|
set err_msg=----- model download clone failed -----
|
||||||
if %errorlevel% neq 0 goto err_exit
|
if %errorlevel% neq 0 goto err_exit
|
||||||
|
deactivate
|
||||||
|
|
||||||
echo ***** Finished downloading models *****
|
echo ***** Finished downloading models *****
|
||||||
|
|
||||||
echo ***** Installing invoke.bat ******
|
|
||||||
copy installer\invoke.bat .\invoke.bat
|
|
||||||
echo All done! Execute the file invoke.bat in this directory to start InvokeAI
|
echo All done! Execute the file invoke.bat in this directory to start InvokeAI
|
||||||
|
|
||||||
@rem more cleanup
|
|
||||||
rd /s /q installer installer_files
|
|
||||||
|
|
||||||
pause
|
pause
|
||||||
exit
|
exit
|
||||||
|
|
82
installer/install.sh → binary_installer/install.sh.in
Executable file → Normal file
@ -1,5 +1,9 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||||
|
scriptdir=$(dirname "$0")
|
||||||
|
cd "$scriptdir"
|
||||||
|
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
IFS=$'\n\t'
|
IFS=$'\n\t'
|
||||||
|
|
||||||
@ -22,14 +26,21 @@ function _err_exit {
|
|||||||
|
|
||||||
# This enables a user to install this project without manually installing git or Python
|
# This enables a user to install this project without manually installing git or Python
|
||||||
|
|
||||||
echo -e "\n***** Installing InvokeAI... *****\n"
|
echo -e "\n***** Installing InvokeAI into $(pwd)... *****\n"
|
||||||
|
|
||||||
|
export no_cache_dir="--no-cache-dir"
|
||||||
|
if [ $# -ge 1 ]; then
|
||||||
|
if [ "$1" = "use-cache" ]; then
|
||||||
|
export no_cache_dir=""
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
OS_NAME=$(uname -s)
|
OS_NAME=$(uname -s)
|
||||||
case "${OS_NAME}" in
|
case "${OS_NAME}" in
|
||||||
Linux*) OS_NAME="linux";;
|
Linux*) OS_NAME="linux";;
|
||||||
Darwin*) OS_NAME="darwin";;
|
Darwin*) OS_NAME="darwin";;
|
||||||
*) echo -e "\n----- Unknown OS: $OS_NAME! This script runs only on Linux or MacOS -----\n" && exit
|
*) echo -e "\n----- Unknown OS: $OS_NAME! This script runs only on Linux or macOS -----\n" && exit
|
||||||
esac
|
esac
|
||||||
|
|
||||||
OS_ARCH=$(uname -m)
|
OS_ARCH=$(uname -m)
|
||||||
@ -81,6 +92,7 @@ if [ "$OS_NAME" == "darwin" ]; then
|
|||||||
elif [ "$OS_NAME" == "linux" ]; then
|
elif [ "$OS_NAME" == "linux" ]; then
|
||||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-unknown-linux-gnu-install_only.tar.gz
|
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-unknown-linux-gnu-install_only.tar.gz
|
||||||
fi
|
fi
|
||||||
|
echo "INSTALLING $RELEASE_SOURCEBALL FROM $RELEASE_URL"
|
||||||
|
|
||||||
PACKAGES_TO_INSTALL=""
|
PACKAGES_TO_INSTALL=""
|
||||||
|
|
||||||
@ -93,20 +105,20 @@ if [ "$PACKAGES_TO_INSTALL" != "" ]; then
|
|||||||
|
|
||||||
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvjO bin/micromamba > micromamba
|
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvjO bin/micromamba > micromamba
|
||||||
|
|
||||||
chmod u+x "micromamba"
|
chmod u+x ./micromamba
|
||||||
|
|
||||||
# test the mamba binary
|
# test the mamba binary
|
||||||
echo -e "\n***** Micromamba version: *****\n"
|
echo -e "\n***** Micromamba version: *****\n"
|
||||||
"micromamba" --version
|
./micromamba --version
|
||||||
|
|
||||||
# create the installer env
|
# create the installer env
|
||||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||||
"micromamba" create -y --prefix "$INSTALL_ENV_DIR"
|
./micromamba create -y --prefix "$INSTALL_ENV_DIR"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "\n***** Packages to install:$PACKAGES_TO_INSTALL *****\n"
|
echo -e "\n***** Packages to install:$PACKAGES_TO_INSTALL *****\n"
|
||||||
|
|
||||||
"micromamba" install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge $PACKAGES_TO_INSTALL
|
./micromamba install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge "$PACKAGES_TO_INSTALL"
|
||||||
|
|
||||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||||
echo -e "\n----- There was a problem while initializing micromamba. Cannot continue. -----\n"
|
echo -e "\n----- There was a problem while initializing micromamba. Cannot continue. -----\n"
|
||||||
@ -154,12 +166,22 @@ echo -e "\n***** Unpacked python-build-standalone *****\n"
|
|||||||
|
|
||||||
# create venv
|
# create venv
|
||||||
_err_msg="\n----- problem creating venv -----\n"
|
_err_msg="\n----- problem creating venv -----\n"
|
||||||
|
|
||||||
|
if [ "$OS_NAME" == "darwin" ]; then
|
||||||
|
# patch sysconfig so that extensions can build properly
|
||||||
|
# adapted from https://github.com/cashapp/hermit-packages/commit/fcba384663892f4d9cfb35e8639ff7a28166ee43
|
||||||
|
PYTHON_INSTALL_DIR="$(pwd)/python"
|
||||||
|
SYSCONFIG="$(echo python/lib/python*/_sysconfigdata_*.py)"
|
||||||
|
TMPFILE="$(mktemp)"
|
||||||
|
chmod +w "${SYSCONFIG}"
|
||||||
|
cp "${SYSCONFIG}" "${TMPFILE}"
|
||||||
|
sed "s,'/install,'${PYTHON_INSTALL_DIR},g" "${TMPFILE}" > "${SYSCONFIG}"
|
||||||
|
rm -f "${TMPFILE}"
|
||||||
|
fi
|
||||||
|
|
||||||
./python/bin/python3 -E -s -m venv .venv
|
./python/bin/python3 -E -s -m venv .venv
|
||||||
_err_exit $? _err_msg
|
_err_exit $? _err_msg
|
||||||
# In reality, the following is ALL that 'activate.bat' does,
|
source .venv/bin/activate
|
||||||
# aside from setting the prompt, which we don't care about
|
|
||||||
export PYTHONPATH=
|
|
||||||
export PATH=.venv/bin:$PATH
|
|
||||||
|
|
||||||
echo -e "\n***** Created Python virtual environment *****\n"
|
echo -e "\n***** Created Python virtual environment *****\n"
|
||||||
|
|
||||||
@ -170,42 +192,44 @@ echo -e "We're running under"
|
|||||||
_err_exit $? _err_msg
|
_err_exit $? _err_msg
|
||||||
|
|
||||||
_err_msg="\n----- pip update failed -----\n"
|
_err_msg="\n----- pip update failed -----\n"
|
||||||
.venv/bin/python3 -m pip install --no-cache-dir --no-warn-script-location --upgrade pip
|
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location --upgrade pip
|
||||||
_err_exit $? _err_msg
|
_err_exit $? _err_msg
|
||||||
|
|
||||||
echo -e "\n***** Updated pip *****\n"
|
echo -e "\n***** Updated pip *****\n"
|
||||||
|
|
||||||
_err_msg="\n----- requirements file copy failed -----\n"
|
_err_msg="\n----- requirements file copy failed -----\n"
|
||||||
cp installer/py3.10-${OS_NAME}-"${OS_ARCH}"-${CD}-reqs.txt requirements.txt
|
cp binary_installer/py3.10-${OS_NAME}-"${OS_ARCH}"-${CD}-reqs.txt requirements.txt
|
||||||
_err_exit $? _err_msg
|
_err_exit $? _err_msg
|
||||||
|
|
||||||
_err_msg="\n----- main pip install failed -----\n"
|
_err_msg="\n----- main pip install failed -----\n"
|
||||||
.venv/bin/python3 -m pip install --no-cache-dir --no-warn-script-location -r requirements.txt
|
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -r requirements.txt
|
||||||
_err_exit $? _err_msg
|
|
||||||
|
|
||||||
_err_msg="\n----- clipseg install failed -----\n"
|
|
||||||
.venv/bin/python3 -m pip install --no-cache-dir --no-warn-script-location git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
|
||||||
_err_exit $? _err_msg
|
|
||||||
|
|
||||||
_err_msg="\n----- InvokeAI setup failed -----\n"
|
|
||||||
.venv/bin/python3 -m pip install --no-cache-dir --no-warn-script-location -e .
|
|
||||||
_err_exit $? _err_msg
|
_err_exit $? _err_msg
|
||||||
|
|
||||||
echo -e "\n***** Installed Python dependencies *****\n"
|
echo -e "\n***** Installed Python dependencies *****\n"
|
||||||
|
|
||||||
|
_err_msg="\n----- InvokeAI setup failed -----\n"
|
||||||
|
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -e .
|
||||||
|
_err_exit $? _err_msg
|
||||||
|
|
||||||
|
echo -e "\n***** Installed InvokeAI *****\n"
|
||||||
|
|
||||||
|
cp binary_installer/invoke.sh.in ./invoke.sh
|
||||||
|
chmod a+rx ./invoke.sh
|
||||||
|
echo -e "\n***** Installed invoke launcher script ******\n"
|
||||||
|
|
||||||
|
# more cleanup
|
||||||
|
rm -rf binary_installer/ installer_files/
|
||||||
|
|
||||||
# preload the models
|
# preload the models
|
||||||
.venv/bin/python3 scripts/preload_models.py
|
.venv/bin/python3 scripts/configure_invokeai.py
|
||||||
_err_msg="\n----- model download clone failed -----\n"
|
_err_msg="\n----- model download clone failed -----\n"
|
||||||
_err_exit $? _err_msg
|
_err_exit $? _err_msg
|
||||||
|
deactivate
|
||||||
|
|
||||||
echo -e "\n***** Finished downloading models *****\n"
|
echo -e "\n***** Finished downloading models *****\n"
|
||||||
|
|
||||||
echo -e "\n***** Installing invoke.sh ******\n"
|
echo "All done! Run the command"
|
||||||
cp installer/invoke.sh .
|
echo " $scriptdir/invoke.sh"
|
||||||
|
echo "to start InvokeAI."
|
||||||
# more cleanup
|
|
||||||
rm -rf installer/ installer_files/
|
|
||||||
|
|
||||||
echo "All done! Run the command './invoke.sh' to start InvokeAI."
|
|
||||||
read -p "Press any key to exit..."
|
read -p "Press any key to exit..."
|
||||||
exit
|
exit
|
36
binary_installer/invoke.bat.in
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
@echo off
|
||||||
|
|
||||||
|
PUSHD "%~dp0"
|
||||||
|
call .venv\Scripts\activate.bat
|
||||||
|
|
||||||
|
echo Do you want to generate images using the
|
||||||
|
echo 1. command-line
|
||||||
|
echo 2. browser-based UI
|
||||||
|
echo OR
|
||||||
|
echo 3. open the developer console
|
||||||
|
set /p choice="Please enter 1, 2 or 3: "
|
||||||
|
if /i "%choice%" == "1" (
|
||||||
|
echo Starting the InvokeAI command-line.
|
||||||
|
.venv\Scripts\python scripts\invoke.py %*
|
||||||
|
) else if /i "%choice%" == "2" (
|
||||||
|
echo Starting the InvokeAI browser-based UI.
|
||||||
|
.venv\Scripts\python scripts\invoke.py --web %*
|
||||||
|
) else if /i "%choice%" == "3" (
|
||||||
|
echo Developer Console
|
||||||
|
echo Python command is:
|
||||||
|
where python
|
||||||
|
echo Python version is:
|
||||||
|
python --version
|
||||||
|
echo *************************
|
||||||
|
echo You are now in the system shell, with the local InvokeAI Python virtual environment activated,
|
||||||
|
echo so that you can troubleshoot this InvokeAI installation as necessary.
|
||||||
|
echo *************************
|
||||||
|
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||||
|
call cmd /k
|
||||||
|
) else (
|
||||||
|
echo Invalid selection
|
||||||
|
pause
|
||||||
|
exit /b
|
||||||
|
)
|
||||||
|
|
||||||
|
deactivate
|
46
binary_installer/invoke.sh.in
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
#!/usr/bin/env sh
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
. .venv/bin/activate
|
||||||
|
|
||||||
|
# set required env var for torch on mac MPS
|
||||||
|
if [ "$(uname -s)" == "Darwin" ]; then
|
||||||
|
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Do you want to generate images using the"
|
||||||
|
echo "1. command-line"
|
||||||
|
echo "2. browser-based UI"
|
||||||
|
echo "OR"
|
||||||
|
echo "3. open the developer console"
|
||||||
|
echo "Please enter 1, 2, or 3:"
|
||||||
|
read choice
|
||||||
|
|
||||||
|
case $choice in
|
||||||
|
1)
|
||||||
|
printf "\nStarting the InvokeAI command-line..\n";
|
||||||
|
.venv/bin/python scripts/invoke.py $*;
|
||||||
|
;;
|
||||||
|
2)
|
||||||
|
printf "\nStarting the InvokeAI browser-based UI..\n";
|
||||||
|
.venv/bin/python scripts/invoke.py --web $*;
|
||||||
|
;;
|
||||||
|
3)
|
||||||
|
printf "\nDeveloper Console:\n";
|
||||||
|
printf "Python command is:\n\t";
|
||||||
|
which python;
|
||||||
|
printf "Python version is:\n\t";
|
||||||
|
python --version;
|
||||||
|
echo "*************************"
|
||||||
|
echo "You are now in your user shell ($SHELL) with the local InvokeAI Python virtual environment activated,";
|
||||||
|
echo "so that you can troubleshoot this InvokeAI installation as necessary.";
|
||||||
|
printf "*************************\n"
|
||||||
|
echo "*** Type \`exit\` to quit this shell and deactivate the Python virtual environment *** ";
|
||||||
|
/usr/bin/env "$SHELL";
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Invalid selection";
|
||||||
|
exit
|
||||||
|
;;
|
||||||
|
esac
|
@ -2,8 +2,9 @@
|
|||||||
# This file is autogenerated by pip-compile with python 3.10
|
# This file is autogenerated by pip-compile with python 3.10
|
||||||
# To update, run:
|
# To update, run:
|
||||||
#
|
#
|
||||||
# pip-compile --allow-unsafe --generate-hashes --output-file=installer/py3.10-darwin-arm64-mps-reqs.txt requirements.in
|
# pip-compile --allow-unsafe --generate-hashes --output-file=installer/py3.10-darwin-arm64-mps-reqs.txt installer/requirements.in
|
||||||
#
|
#
|
||||||
|
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
||||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||||
--trusted-host https
|
--trusted-host https
|
||||||
|
|
||||||
@ -13,10 +14,12 @@ absl-py==1.3.0 \
|
|||||||
# via
|
# via
|
||||||
# tb-nightly
|
# tb-nightly
|
||||||
# tensorboard
|
# tensorboard
|
||||||
accelerate==0.13.2 \
|
accelerate==0.14.0 \
|
||||||
--hash=sha256:dd6f08b010077f252dda5a7699d87b02885335c456770939c536e65ff07ed760 \
|
--hash=sha256:31c5bcc40564ef849b5bc1c4424a43ccaf9e26413b7df89c2e36bf81f070fd44 \
|
||||||
--hash=sha256:e22180d7094e4c1bfb05a2b078297c222f6b4fa595fde8916946c3f377cdf019
|
--hash=sha256:b15d562c0889d0cf441b01faa025dfc29b163d061b6cc7d489c2c83b0a55ffab
|
||||||
# via k-diffusion
|
# via
|
||||||
|
# -r installer/requirements.in
|
||||||
|
# k-diffusion
|
||||||
addict==2.4.0 \
|
addict==2.4.0 \
|
||||||
--hash=sha256:249bb56bbfd3cdc2a004ea0ff4c2b6ddc84d53bc2194761636eb314d5cfa5dfc \
|
--hash=sha256:249bb56bbfd3cdc2a004ea0ff4c2b6ddc84d53bc2194761636eb314d5cfa5dfc \
|
||||||
--hash=sha256:b3b2210e0e067a281f5646c8c5db92e99b7231ea8b0eb5f74dbdf9e259d4e494
|
--hash=sha256:b3b2210e0e067a281f5646c8c5db92e99b7231ea8b0eb5f74dbdf9e259d4e494
|
||||||
@ -117,7 +120,7 @@ aiosignal==1.2.0 \
|
|||||||
albumentations==1.3.0 \
|
albumentations==1.3.0 \
|
||||||
--hash=sha256:294165d87d03bc8323e484927f0a5c1a3c64b0e7b9c32a979582a6c93c363bdf \
|
--hash=sha256:294165d87d03bc8323e484927f0a5c1a3c64b0e7b9c32a979582a6c93c363bdf \
|
||||||
--hash=sha256:be1af36832c8893314f2a5550e8ac19801e04770734c1b70fa3c996b41f37bed
|
--hash=sha256:be1af36832c8893314f2a5550e8ac19801e04770734c1b70fa3c996b41f37bed
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
altair==4.2.0 \
|
altair==4.2.0 \
|
||||||
--hash=sha256:0c724848ae53410c13fa28be2b3b9a9dcb7b5caa1a70f7f217bd663bb419935a \
|
--hash=sha256:0c724848ae53410c13fa28be2b3b9a9dcb7b5caa1a70f7f217bd663bb419935a \
|
||||||
--hash=sha256:d87d9372e63b48cd96b2a6415f0cf9457f50162ab79dc7a31cd7e024dd840026
|
--hash=sha256:d87d9372e63b48cd96b2a6415f0cf9457f50162ab79dc7a31cd7e024dd840026
|
||||||
@ -148,6 +151,10 @@ blinker==1.5 \
|
|||||||
--hash=sha256:1eb563df6fdbc39eeddc177d953203f99f097e9bf0e2b8f9f3cf18b6ca425e36 \
|
--hash=sha256:1eb563df6fdbc39eeddc177d953203f99f097e9bf0e2b8f9f3cf18b6ca425e36 \
|
||||||
--hash=sha256:923e5e2f69c155f2cc42dafbbd70e16e3fde24d2d4aa2ab72fbe386238892462
|
--hash=sha256:923e5e2f69c155f2cc42dafbbd70e16e3fde24d2d4aa2ab72fbe386238892462
|
||||||
# via streamlit
|
# via streamlit
|
||||||
|
boltons==21.0.0 \
|
||||||
|
--hash=sha256:65e70a79a731a7fe6e98592ecfb5ccf2115873d01dbc576079874629e5c90f13 \
|
||||||
|
--hash=sha256:b9bb7b58b2b420bbe11a6025fdef6d3e5edc9f76a42fb467afe7ca212ef9948b
|
||||||
|
# via torchsde
|
||||||
cachetools==5.2.0 \
|
cachetools==5.2.0 \
|
||||||
--hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \
|
--hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \
|
||||||
--hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db
|
--hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db
|
||||||
@ -180,13 +187,12 @@ click==8.1.3 \
|
|||||||
# wandb
|
# wandb
|
||||||
clip @ https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip \
|
clip @ https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip \
|
||||||
--hash=sha256:b5842c25da441d6c581b53a5c60e0c2127ebafe0f746f8e15561a006c6c3be6a
|
--hash=sha256:b5842c25da441d6c581b53a5c60e0c2127ebafe0f746f8e15561a006c6c3be6a
|
||||||
# via -r requirements.in
|
|
||||||
colorama==0.4.6 \
|
|
||||||
--hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \
|
|
||||||
--hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6
|
|
||||||
# via
|
# via
|
||||||
# click
|
# -r installer/requirements.in
|
||||||
# tqdm
|
# clipseg
|
||||||
|
clipseg @ https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip \
|
||||||
|
--hash=sha256:14f43ed42f90be3fe57f06de483cb8be0f67f87a6f62a011339d45a39f4b4189
|
||||||
|
# via -r installer/requirements.in
|
||||||
commonmark==0.9.1 \
|
commonmark==0.9.1 \
|
||||||
--hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \
|
--hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \
|
||||||
--hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9
|
--hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9
|
||||||
@ -273,7 +279,7 @@ decorator==5.1.1 \
|
|||||||
diffusers==0.7.2 \
|
diffusers==0.7.2 \
|
||||||
--hash=sha256:4a5f8b3a5fbd936bba7d459611cb35ec62875030367be32b232f9e19543e25a9 \
|
--hash=sha256:4a5f8b3a5fbd936bba7d459611cb35ec62875030367be32b232f9e19543e25a9 \
|
||||||
--hash=sha256:fb814ffd150cc6f470380b8c6a521181a77beb2f44134d2aad2e4cd8aa2ced0e
|
--hash=sha256:fb814ffd150cc6f470380b8c6a521181a77beb2f44134d2aad2e4cd8aa2ced0e
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
dnspython==2.2.1 \
|
dnspython==2.2.1 \
|
||||||
--hash=sha256:0f7569a4a6ff151958b64304071d370daa3243d15941a7beedf0c9fe5105603e \
|
--hash=sha256:0f7569a4a6ff151958b64304071d370daa3243d15941a7beedf0c9fe5105603e \
|
||||||
--hash=sha256:a851e51367fb93e9e1361732c1d60dab63eff98712e503ea7d92e6eccb109b4f
|
--hash=sha256:a851e51367fb93e9e1361732c1d60dab63eff98712e503ea7d92e6eccb109b4f
|
||||||
@ -293,7 +299,7 @@ entrypoints==0.4 \
|
|||||||
eventlet==0.33.1 \
|
eventlet==0.33.1 \
|
||||||
--hash=sha256:a085922698e5029f820cf311a648ac324d73cec0e4792877609d978a4b5bbf31 \
|
--hash=sha256:a085922698e5029f820cf311a648ac324d73cec0e4792877609d978a4b5bbf31 \
|
||||||
--hash=sha256:afbe17f06a58491e9aebd7a4a03e70b0b63fd4cf76d8307bae07f280479b1515
|
--hash=sha256:afbe17f06a58491e9aebd7a4a03e70b0b63fd4cf76d8307bae07f280479b1515
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
facexlib==0.2.5 \
|
facexlib==0.2.5 \
|
||||||
--hash=sha256:31e20cc4ed5d63562d380e4564bae14ac0d5d1899a079bad87621e13564567e4 \
|
--hash=sha256:31e20cc4ed5d63562d380e4564bae14ac0d5d1899a079bad87621e13564567e4 \
|
||||||
--hash=sha256:cc7ceb56c5424319c47223cf75eef6828c34c66082707c6eb35b95d39779f02d
|
--hash=sha256:cc7ceb56c5424319c47223cf75eef6828c34c66082707c6eb35b95d39779f02d
|
||||||
@ -319,15 +325,15 @@ flask==2.2.2 \
|
|||||||
flask-cors==3.0.10 \
|
flask-cors==3.0.10 \
|
||||||
--hash=sha256:74efc975af1194fc7891ff5cd85b0f7478be4f7f59fe158102e91abb72bb4438 \
|
--hash=sha256:74efc975af1194fc7891ff5cd85b0f7478be4f7f59fe158102e91abb72bb4438 \
|
||||||
--hash=sha256:b60839393f3b84a0f3746f6cdca56c1ad7426aa738b70d6c61375857823181de
|
--hash=sha256:b60839393f3b84a0f3746f6cdca56c1ad7426aa738b70d6c61375857823181de
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
flask-socketio==5.3.1 \
|
flask-socketio==5.3.1 \
|
||||||
--hash=sha256:fd0ed0fc1341671d92d5f5b2f5503916deb7aa7e2940e6636cfa2c087c828bf9 \
|
--hash=sha256:fd0ed0fc1341671d92d5f5b2f5503916deb7aa7e2940e6636cfa2c087c828bf9 \
|
||||||
--hash=sha256:ff0c721f20bff1e2cfba77948727a8db48f187e89a72fe50c34478ce6efb3353
|
--hash=sha256:ff0c721f20bff1e2cfba77948727a8db48f187e89a72fe50c34478ce6efb3353
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
flaskwebgui==0.3.7 \
|
flaskwebgui==0.3.7 \
|
||||||
--hash=sha256:4a69955308eaa8bb256ba04a994dc8f58a48dcd6f9599694ab1bcd9f43d88a5d \
|
--hash=sha256:4a69955308eaa8bb256ba04a994dc8f58a48dcd6f9599694ab1bcd9f43d88a5d \
|
||||||
--hash=sha256:535974ce2672dcc74787c254de24cceed4101be75d96952dae82014dd57f061e
|
--hash=sha256:535974ce2672dcc74787c254de24cceed4101be75d96952dae82014dd57f061e
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
fonttools==4.38.0 \
|
fonttools==4.38.0 \
|
||||||
--hash=sha256:2bb244009f9bf3fa100fc3ead6aeb99febe5985fa20afbfbaa2f8946c2fbdaf1 \
|
--hash=sha256:2bb244009f9bf3fa100fc3ead6aeb99febe5985fa20afbfbaa2f8946c2fbdaf1 \
|
||||||
--hash=sha256:820466f43c8be8c3009aef8b87e785014133508f0de64ec469e4efb643ae54fb
|
--hash=sha256:820466f43c8be8c3009aef8b87e785014133508f0de64ec469e4efb643ae54fb
|
||||||
@ -411,11 +417,11 @@ future==0.18.2 \
|
|||||||
getpass-asterisk==1.0.1 \
|
getpass-asterisk==1.0.1 \
|
||||||
--hash=sha256:20d45cafda0066d761961e0919728526baf7bb5151fbf48a7d5ea4034127d857 \
|
--hash=sha256:20d45cafda0066d761961e0919728526baf7bb5151fbf48a7d5ea4034127d857 \
|
||||||
--hash=sha256:7cc357a924cf62fa4e15b73cb4e5e30685c9084e464ffdc3fd9000a2b54ea9e9
|
--hash=sha256:7cc357a924cf62fa4e15b73cb4e5e30685c9084e464ffdc3fd9000a2b54ea9e9
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
gfpgan @ https://github.com/TencentARC/GFPGAN/archive/2eac2033893ca7f427f4035d80fe95b92649ac56.zip \
|
gfpgan @ https://github.com/TencentARC/GFPGAN/archive/2eac2033893ca7f427f4035d80fe95b92649ac56.zip \
|
||||||
--hash=sha256:79e6d71c8f1df7c7ccb0ac6b9a2ccb615ad5cde818c8b6f285a8711c05aebf85
|
--hash=sha256:79e6d71c8f1df7c7ccb0ac6b9a2ccb615ad5cde818c8b6f285a8711c05aebf85
|
||||||
# via
|
# via
|
||||||
# -r requirements.in
|
# -r installer/requirements.in
|
||||||
# realesrgan
|
# realesrgan
|
||||||
gitdb==4.0.9 \
|
gitdb==4.0.9 \
|
||||||
--hash=sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd \
|
--hash=sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd \
|
||||||
@ -576,7 +582,7 @@ imageio-ffmpeg==0.4.7 \
|
|||||||
--hash=sha256:7a08838f97f363e37ca41821b864fd3fdc99ab1fe2421040c78eb5f56a9e723e \
|
--hash=sha256:7a08838f97f363e37ca41821b864fd3fdc99ab1fe2421040c78eb5f56a9e723e \
|
||||||
--hash=sha256:8e724d12dfe83e2a6eb39619e820243ca96c81c47c2648e66e05f7ee24e14312 \
|
--hash=sha256:8e724d12dfe83e2a6eb39619e820243ca96c81c47c2648e66e05f7ee24e14312 \
|
||||||
--hash=sha256:fc60686ef03c2d0f842901b206223c30051a6a120384458761390104470846fd
|
--hash=sha256:fc60686ef03c2d0f842901b206223c30051a6a120384458761390104470846fd
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
importlib-metadata==5.0.0 \
|
importlib-metadata==5.0.0 \
|
||||||
--hash=sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab \
|
--hash=sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab \
|
||||||
--hash=sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43
|
--hash=sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43
|
||||||
@ -607,9 +613,9 @@ jsonschema==4.17.0 \
|
|||||||
# via
|
# via
|
||||||
# altair
|
# altair
|
||||||
# jsonmerge
|
# jsonmerge
|
||||||
k-diffusion @ https://github.com/invoke-ai/k-diffusion/archive/7f16b2c33411f26b3eae78d10648d625cb0c1095.zip \
|
k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip \
|
||||||
--hash=sha256:c3f2c84036aa98c3abf4552fafab04df5ca472aa639982795e05bb1db43ce5e4
|
--hash=sha256:8eac5cdc08736e6d61908a1b2948f2b2f62691b01dc1aab978bddb3451af0d66
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
kiwisolver==1.4.4 \
|
kiwisolver==1.4.4 \
|
||||||
--hash=sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b \
|
--hash=sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b \
|
||||||
--hash=sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166 \
|
--hash=sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166 \
|
||||||
@ -837,7 +843,9 @@ matplotlib==3.6.2 \
|
|||||||
--hash=sha256:ec9be0f4826cdb3a3a517509dcc5f87f370251b76362051ab59e42b6b765f8c4 \
|
--hash=sha256:ec9be0f4826cdb3a3a517509dcc5f87f370251b76362051ab59e42b6b765f8c4 \
|
||||||
--hash=sha256:f04f97797df35e442ed09f529ad1235d1f1c0f30878e2fe09a2676b71a8801e0 \
|
--hash=sha256:f04f97797df35e442ed09f529ad1235d1f1c0f30878e2fe09a2676b71a8801e0 \
|
||||||
--hash=sha256:f41e57ad63d336fe50d3a67bb8eaa26c09f6dda6a59f76777a99b8ccd8e26aec
|
--hash=sha256:f41e57ad63d336fe50d3a67bb8eaa26c09f6dda6a59f76777a99b8ccd8e26aec
|
||||||
# via filterpy
|
# via
|
||||||
|
# clipseg
|
||||||
|
# filterpy
|
||||||
multidict==6.0.2 \
|
multidict==6.0.2 \
|
||||||
--hash=sha256:0327292e745a880459ef71be14e709aaea2f783f3537588fb4ed09b6c01bca60 \
|
--hash=sha256:0327292e745a880459ef71be14e709aaea2f783f3537588fb4ed09b6c01bca60 \
|
||||||
--hash=sha256:041b81a5f6b38244b34dc18c7b6aba91f9cdaf854d9a39e5ff0b58e2b5773b9c \
|
--hash=sha256:041b81a5f6b38244b34dc18c7b6aba91f9cdaf854d9a39e5ff0b58e2b5773b9c \
|
||||||
@ -970,6 +978,7 @@ numpy==1.23.4 \
|
|||||||
# altair
|
# altair
|
||||||
# basicsr
|
# basicsr
|
||||||
# clean-fid
|
# clean-fid
|
||||||
|
# clipseg
|
||||||
# contourpy
|
# contourpy
|
||||||
# diffusers
|
# diffusers
|
||||||
# facexlib
|
# facexlib
|
||||||
@ -983,6 +992,7 @@ numpy==1.23.4 \
|
|||||||
# pandas
|
# pandas
|
||||||
# pyarrow
|
# pyarrow
|
||||||
# pydeck
|
# pydeck
|
||||||
|
# pypatchmatch
|
||||||
# pytorch-lightning
|
# pytorch-lightning
|
||||||
# pywavelets
|
# pywavelets
|
||||||
# qudida
|
# qudida
|
||||||
@ -998,6 +1008,7 @@ numpy==1.23.4 \
|
|||||||
# tifffile
|
# tifffile
|
||||||
# torch-fidelity
|
# torch-fidelity
|
||||||
# torchmetrics
|
# torchmetrics
|
||||||
|
# torchsde
|
||||||
# torchvision
|
# torchvision
|
||||||
# transformers
|
# transformers
|
||||||
oauthlib==3.2.2 \
|
oauthlib==3.2.2 \
|
||||||
@ -1018,6 +1029,7 @@ opencv-python==4.6.0.66 \
|
|||||||
--hash=sha256:f482e78de6e7b0b060ff994ffd859bddc3f7f382bb2019ef157b0ea8ca8712f5
|
--hash=sha256:f482e78de6e7b0b060ff994ffd859bddc3f7f382bb2019ef157b0ea8ca8712f5
|
||||||
# via
|
# via
|
||||||
# basicsr
|
# basicsr
|
||||||
|
# clipseg
|
||||||
# facexlib
|
# facexlib
|
||||||
# gfpgan
|
# gfpgan
|
||||||
# realesrgan
|
# realesrgan
|
||||||
@ -1080,6 +1092,10 @@ pandas==1.5.1 \
|
|||||||
pathtools==0.1.2 \
|
pathtools==0.1.2 \
|
||||||
--hash=sha256:7c35c5421a39bb82e58018febd90e3b6e5db34c5443aaaf742b3f33d4655f1c0
|
--hash=sha256:7c35c5421a39bb82e58018febd90e3b6e5db34c5443aaaf742b3f33d4655f1c0
|
||||||
# via wandb
|
# via wandb
|
||||||
|
picklescan==0.0.5 \
|
||||||
|
--hash=sha256:368cf1b9a075bc1b6460ad82b694f260532b836c82f99d13846cd36e1bbe7f9a \
|
||||||
|
--hash=sha256:57153eca04d5df5009f2cdd595aef261b8a6f27e03046a1c84f672aa6869c592
|
||||||
|
# via -r installer/requirements.in
|
||||||
pillow==9.3.0 \
|
pillow==9.3.0 \
|
||||||
--hash=sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040 \
|
--hash=sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040 \
|
||||||
--hash=sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8 \
|
--hash=sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8 \
|
||||||
@ -1150,6 +1166,7 @@ pillow==9.3.0 \
|
|||||||
# imageio
|
# imageio
|
||||||
# k-diffusion
|
# k-diffusion
|
||||||
# matplotlib
|
# matplotlib
|
||||||
|
# pypatchmatch
|
||||||
# realesrgan
|
# realesrgan
|
||||||
# scikit-image
|
# scikit-image
|
||||||
# streamlit
|
# streamlit
|
||||||
@ -1285,10 +1302,13 @@ pyparsing==3.0.9 \
|
|||||||
# via
|
# via
|
||||||
# matplotlib
|
# matplotlib
|
||||||
# packaging
|
# packaging
|
||||||
|
pypatchmatch @ https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip \
|
||||||
|
--hash=sha256:4ad6ec95379e7d122d494ff76633cc7cf9b71330d5efda147fceba81e3dc6cd2
|
||||||
|
# via -r installer/requirements.in
|
||||||
pyreadline3==3.4.1 \
|
pyreadline3==3.4.1 \
|
||||||
--hash=sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae \
|
--hash=sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae \
|
||||||
--hash=sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb
|
--hash=sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
pyrsistent==0.19.2 \
|
pyrsistent==0.19.2 \
|
||||||
--hash=sha256:055ab45d5911d7cae397dc418808d8802fb95262751872c841c170b0dbf51eed \
|
--hash=sha256:055ab45d5911d7cae397dc418808d8802fb95262751872c841c170b0dbf51eed \
|
||||||
--hash=sha256:111156137b2e71f3a9936baf27cb322e8024dac3dc54ec7fb9f0bcf3249e68bb \
|
--hash=sha256:111156137b2e71f3a9936baf27cb322e8024dac3dc54ec7fb9f0bcf3249e68bb \
|
||||||
@ -1425,7 +1445,7 @@ qudida==0.0.4 \
|
|||||||
realesrgan==0.3.0 \
|
realesrgan==0.3.0 \
|
||||||
--hash=sha256:0d36da96ab9f447071606e91f502ccdfb08f80cc82ee4f8caf720c7745ccec7e \
|
--hash=sha256:0d36da96ab9f447071606e91f502ccdfb08f80cc82ee4f8caf720c7745ccec7e \
|
||||||
--hash=sha256:59336c16c30dd5130eff350dd27424acb9b7281d18a6810130e265606c9a6088
|
--hash=sha256:59336c16c30dd5130eff350dd27424acb9b7281d18a6810130e265606c9a6088
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
regex==2022.10.31 \
|
regex==2022.10.31 \
|
||||||
--hash=sha256:052b670fafbe30966bbe5d025e90b2a491f85dfe5b2583a163b5e60a85a321ad \
|
--hash=sha256:052b670fafbe30966bbe5d025e90b2a491f85dfe5b2583a163b5e60a85a321ad \
|
||||||
--hash=sha256:0653d012b3bf45f194e5e6a41df9258811ac8fc395579fa82958a8b76286bea4 \
|
--hash=sha256:0653d012b3bf45f194e5e6a41df9258811ac8fc395579fa82958a8b76286bea4 \
|
||||||
@ -1631,6 +1651,7 @@ scipy==1.9.3 \
|
|||||||
# albumentations
|
# albumentations
|
||||||
# basicsr
|
# basicsr
|
||||||
# clean-fid
|
# clean-fid
|
||||||
|
# clipseg
|
||||||
# facexlib
|
# facexlib
|
||||||
# filterpy
|
# filterpy
|
||||||
# gfpgan
|
# gfpgan
|
||||||
@ -1639,6 +1660,7 @@ scipy==1.9.3 \
|
|||||||
# scikit-learn
|
# scikit-learn
|
||||||
# torch-fidelity
|
# torch-fidelity
|
||||||
# torchdiffeq
|
# torchdiffeq
|
||||||
|
# torchsde
|
||||||
semver==2.13.0 \
|
semver==2.13.0 \
|
||||||
--hash=sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4 \
|
--hash=sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4 \
|
||||||
--hash=sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f
|
--hash=sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f
|
||||||
@ -1646,7 +1668,7 @@ semver==2.13.0 \
|
|||||||
send2trash==1.8.0 \
|
send2trash==1.8.0 \
|
||||||
--hash=sha256:d2c24762fd3759860a0aff155e45871447ea58d2be6bdd39b5c8f966a0c99c2d \
|
--hash=sha256:d2c24762fd3759860a0aff155e45871447ea58d2be6bdd39b5c8f966a0c99c2d \
|
||||||
--hash=sha256:f20eaadfdb517eaca5ce077640cb261c7d2698385a6a0f072a4a5447fd49fa08
|
--hash=sha256:f20eaadfdb517eaca5ce077640cb261c7d2698385a6a0f072a4a5447fd49fa08
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
sentry-sdk==1.10.1 \
|
sentry-sdk==1.10.1 \
|
||||||
--hash=sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad \
|
--hash=sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad \
|
||||||
--hash=sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691
|
--hash=sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691
|
||||||
@ -1737,11 +1759,11 @@ smmap==5.0.0 \
|
|||||||
streamlit==1.14.0 \
|
streamlit==1.14.0 \
|
||||||
--hash=sha256:62556d873567e1b3427bcd118a57ee6946619f363bd6bba38df2d1f8225ecba0 \
|
--hash=sha256:62556d873567e1b3427bcd118a57ee6946619f363bd6bba38df2d1f8225ecba0 \
|
||||||
--hash=sha256:e078b8143d150ba721bdb9194218e311c5fe1d6d4156473a2dea6cc848a6c9fc
|
--hash=sha256:e078b8143d150ba721bdb9194218e311c5fe1d6d4156473a2dea6cc848a6c9fc
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
taming-transformers-rom1504==0.0.6 \
|
taming-transformers-rom1504==0.0.6 \
|
||||||
--hash=sha256:051b5804c58caa247bcd51d17ddb525b4d5f892a29d42dc460f40e3e9e34e5d8 \
|
--hash=sha256:051b5804c58caa247bcd51d17ddb525b4d5f892a29d42dc460f40e3e9e34e5d8 \
|
||||||
--hash=sha256:73fe5fc1108accee4236ee6976e0987ab236afad0af06cb9f037641a908d2c32
|
--hash=sha256:73fe5fc1108accee4236ee6976e0987ab236afad0af06cb9f037641a908d2c32
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
tb-nightly==2.11.0a20221106 \
|
tb-nightly==2.11.0a20221106 \
|
||||||
--hash=sha256:8940457ee42db92f01da8bcdbbea1a476735eda559dde5976f5728919960af4a
|
--hash=sha256:8940457ee42db92f01da8bcdbbea1a476735eda559dde5976f5728919960af4a
|
||||||
# via
|
# via
|
||||||
@ -1766,7 +1788,7 @@ tensorboard-plugin-wit==1.8.1 \
|
|||||||
# tensorboard
|
# tensorboard
|
||||||
test-tube==0.7.5 \
|
test-tube==0.7.5 \
|
||||||
--hash=sha256:1379c33eb8cde3e9b36610f87da0f16c2e06496b1cfebac473df4e7be2faa124
|
--hash=sha256:1379c33eb8cde3e9b36610f87da0f16c2e06496b1cfebac473df4e7be2faa124
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
threadpoolctl==3.1.0 \
|
threadpoolctl==3.1.0 \
|
||||||
--hash=sha256:8b99adda265feb6773280df41eece7b2e6561b772d21ffd52e372f999024907b \
|
--hash=sha256:8b99adda265feb6773280df41eece7b2e6561b772d21ffd52e372f999024907b \
|
||||||
--hash=sha256:a335baacfaa4400ae1f0d8e3a58d6674d2f8828e3716bb2802c44955ad391380
|
--hash=sha256:a335baacfaa4400ae1f0d8e3a58d6674d2f8828e3716bb2802c44955ad391380
|
||||||
@ -1816,29 +1838,29 @@ toolz==0.12.0 \
|
|||||||
--hash=sha256:2059bd4148deb1884bb0eb770a3cde70e7f954cfbbdc2285f1f2de01fd21eb6f \
|
--hash=sha256:2059bd4148deb1884bb0eb770a3cde70e7f954cfbbdc2285f1f2de01fd21eb6f \
|
||||||
--hash=sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194
|
--hash=sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194
|
||||||
# via altair
|
# via altair
|
||||||
torch==1.13.0 \
|
torch==1.12.1 ; platform_system == "Darwin" \
|
||||||
--hash=sha256:0fdd38c96230947b1ed870fed4a560252f8d23c3a2bf4dab9d2d42b18f2e67c8 \
|
--hash=sha256:03e31c37711db2cd201e02de5826de875529e45a55631d317aadce2f1ed45aa8 \
|
||||||
--hash=sha256:220325d0f4e69ee9edf00c04208244ef7cf22ebce083815ce272c7491f0603f5 \
|
--hash=sha256:0b44601ec56f7dd44ad8afc00846051162ef9c26a8579dda0a02194327f2d55e \
|
||||||
--hash=sha256:43db0723fc66ad6486f86dc4890c497937f7cd27429f28f73fb7e4d74b7482e2 \
|
--hash=sha256:42e115dab26f60c29e298559dbec88444175528b729ae994ec4c65d56fe267dd \
|
||||||
--hash=sha256:47fe6228386bff6d74319a2ffe9d4ed943e6e85473d78e80502518c607d644d2 \
|
--hash=sha256:42f639501928caabb9d1d55ddd17f07cd694de146686c24489ab8c615c2871f2 \
|
||||||
--hash=sha256:49a949b8136b32b2ec0724cbf4c6678b54e974b7d68f19f1231eea21cde5c23b \
|
--hash=sha256:4e1b9c14cf13fd2ab8d769529050629a0e68a6fc5cb8e84b4a3cc1dd8c4fe541 \
|
||||||
--hash=sha256:4a378f5091307381abfb30eb821174e12986f39b1cf7c4522bf99155256819eb \
|
--hash=sha256:68104e4715a55c4bb29a85c6a8d57d820e0757da363be1ba680fa8cc5be17b52 \
|
||||||
--hash=sha256:635dbb99d981a6483ca533b3dc7be18ef08dd9e1e96fb0bb0e6a99d79e85a130 \
|
--hash=sha256:69fe2cae7c39ccadd65a123793d30e0db881f1c1927945519c5c17323131437e \
|
||||||
--hash=sha256:6c227c16626e4ce766cca5351cc62a2358a11e8e466410a298487b9dff159eb1 \
|
--hash=sha256:6cf6f54b43c0c30335428195589bd00e764a6d27f3b9ba637aaa8c11aaf93073 \
|
||||||
--hash=sha256:857c7d5b1624c5fd979f66d2b074765733dba3f5e1cc97b7d6909155a2aae3ce \
|
--hash=sha256:743784ccea0dc8f2a3fe6a536bec8c4763bd82c1352f314937cb4008d4805de1 \
|
||||||
--hash=sha256:9197ec216833b836b67e4d68e513d31fb38d9789d7cd998a08fba5b499c38454 \
|
--hash=sha256:8a34a2fbbaa07c921e1b203f59d3d6e00ed379f2b384445773bd14e328a5b6c8 \
|
||||||
--hash=sha256:922a4910613b310fbeb87707f00cb76fec328eb60cc1349ed2173e7c9b6edcd8 \
|
--hash=sha256:976c3f997cea38ee91a0dd3c3a42322785414748d1761ef926b789dfa97c6134 \
|
||||||
--hash=sha256:9ac382cedaf2f70afea41380ad8e7c06acef6b5b7e2aef3971cdad666ca6e185 \
|
--hash=sha256:9b356aea223772cd754edb4d9ecf2a025909b8615a7668ac7d5130f86e7ec421 \
|
||||||
--hash=sha256:bb33a911460475d1594a8c8cb73f58c08293211760796d99cae8c2509b86d7f1 \
|
--hash=sha256:9c038662db894a23e49e385df13d47b2a777ffd56d9bcd5b832593fab0a7e286 \
|
||||||
--hash=sha256:cd1e67db6575e1b173a626077a54e4911133178557aac50683db03a34e2b636a \
|
--hash=sha256:a8320ba9ad87e80ca5a6a016e46ada4d1ba0c54626e135d99b2129a4541c509d \
|
||||||
--hash=sha256:d2d2753519415d154de4d3e64d2eaaeefdba6b6fd7d69d5ffaef595988117700 \
|
--hash=sha256:b5dbcca369800ce99ba7ae6dee3466607a66958afca3b740690d88168752abcf \
|
||||||
--hash=sha256:e20df14d874b024851c58e8bb3846249cb120e677f7463f60c986e3661f88680 \
|
--hash=sha256:bfec2843daa654f04fda23ba823af03e7b6f7650a873cdb726752d0e3718dada \
|
||||||
--hash=sha256:e643ac8d086706e82f77b5d4dfcf145a9dd37b69e03e64177fc23821754d2ed7 \
|
--hash=sha256:cd26d8c5640c3a28c526d41ccdca14cf1cbca0d0f2e14e8263a7ac17194ab1d2 \
|
||||||
--hash=sha256:ef934a21da6f6a516d0a9c712a80d09c56128abdc6af8dc151bee5199b4c3b4e \
|
--hash=sha256:e9c8f4a311ac29fc7e8e955cfb7733deb5dbe1bdaabf5d4af2765695824b7e0d \
|
||||||
--hash=sha256:f01a9ae0d4b69d2fc4145e8beab45b7877342dddbd4838a7d3c11ca7f6680745 \
|
--hash=sha256:f00c721f489089dc6364a01fd84906348fe02243d0af737f944fddb36003400d \
|
||||||
--hash=sha256:f68edfea71ade3862039ba66bcedf954190a2db03b0c41a9b79afd72210abd97 \
|
--hash=sha256:f3b52a634e62821e747e872084ab32fbcb01b7fa7dbb7471b6218279f02a178a
|
||||||
--hash=sha256:fa768432ce4b8ffa29184c79a3376ab3de4a57b302cdf3c026a6be4c5a8ab75b
|
|
||||||
# via
|
# via
|
||||||
|
# -r installer/requirements.in
|
||||||
# accelerate
|
# accelerate
|
||||||
# basicsr
|
# basicsr
|
||||||
# clean-fid
|
# clean-fid
|
||||||
@ -1854,11 +1876,12 @@ torch==1.13.0 \
|
|||||||
# torch-fidelity
|
# torch-fidelity
|
||||||
# torchdiffeq
|
# torchdiffeq
|
||||||
# torchmetrics
|
# torchmetrics
|
||||||
|
# torchsde
|
||||||
# torchvision
|
# torchvision
|
||||||
torch-fidelity==0.3.0 \
|
torch-fidelity==0.3.0 \
|
||||||
--hash=sha256:3d3e33db98919759cc4f3f24cb27e1e74bdc7c905d90a780630e4e1c18492b66 \
|
--hash=sha256:3d3e33db98919759cc4f3f24cb27e1e74bdc7c905d90a780630e4e1c18492b66 \
|
||||||
--hash=sha256:d01284825595feb7dc3eae3dc9a0d8ced02be764813a3483f109bc142b52a1d3
|
--hash=sha256:d01284825595feb7dc3eae3dc9a0d8ced02be764813a3483f109bc142b52a1d3
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
torchdiffeq==0.2.3 \
|
torchdiffeq==0.2.3 \
|
||||||
--hash=sha256:b5b01ec1294a2d8d5f77e567bf17c5de1237c0573cb94deefa88326f0e18c338 \
|
--hash=sha256:b5b01ec1294a2d8d5f77e567bf17c5de1237c0573cb94deefa88326f0e18c338 \
|
||||||
--hash=sha256:fe75f434b9090ac0c27702e02bed21472b0f87035be6581f51edc5d4013ea31a
|
--hash=sha256:fe75f434b9090ac0c27702e02bed21472b0f87035be6581f51edc5d4013ea31a
|
||||||
@ -1867,6 +1890,10 @@ torchmetrics==0.10.2 \
|
|||||||
--hash=sha256:43757d82266969906fc74b6e80766fcb2a0d52d6c3d09e3b7c98cf3b733fd20c \
|
--hash=sha256:43757d82266969906fc74b6e80766fcb2a0d52d6c3d09e3b7c98cf3b733fd20c \
|
||||||
--hash=sha256:daa29d96bff5cff04d80eec5b9f5076993d6ac9c2d2163e88b6b31f8d38f7c25
|
--hash=sha256:daa29d96bff5cff04d80eec5b9f5076993d6ac9c2d2163e88b6b31f8d38f7c25
|
||||||
# via pytorch-lightning
|
# via pytorch-lightning
|
||||||
|
torchsde==0.2.5 \
|
||||||
|
--hash=sha256:222be9e15610d37a4b5a71cfa0c442178f9fd9ca02f6522a3e11c370b3d0906b \
|
||||||
|
--hash=sha256:4c34373a94a357bdf60bbfee00c850f3563d634491555820b900c9a4f7eff300
|
||||||
|
# via k-diffusion
|
||||||
torchvision==0.13.1 ; platform_system == "Darwin" \
|
torchvision==0.13.1 ; platform_system == "Darwin" \
|
||||||
--hash=sha256:0298bae3b09ac361866088434008d82b99d6458fe8888c8df90720ef4b347d44 \
|
--hash=sha256:0298bae3b09ac361866088434008d82b99d6458fe8888c8df90720ef4b347d44 \
|
||||||
--hash=sha256:08f592ea61836ebeceb5c97f4d7a813b9d7dc651bbf7ce4401563ccfae6a21fc \
|
--hash=sha256:08f592ea61836ebeceb5c97f4d7a813b9d7dc651bbf7ce4401563ccfae6a21fc \
|
||||||
@ -1888,7 +1915,7 @@ torchvision==0.13.1 ; platform_system == "Darwin" \
|
|||||||
--hash=sha256:ef5fe3ec1848123cd0ec74c07658192b3147dcd38e507308c790d5943e87b88c \
|
--hash=sha256:ef5fe3ec1848123cd0ec74c07658192b3147dcd38e507308c790d5943e87b88c \
|
||||||
--hash=sha256:f230a1a40ed70d51e463ce43df243ec520902f8725de2502e485efc5eea9d864
|
--hash=sha256:f230a1a40ed70d51e463ce43df243ec520902f8725de2502e485efc5eea9d864
|
||||||
# via
|
# via
|
||||||
# -r requirements.in
|
# -r installer/requirements.in
|
||||||
# basicsr
|
# basicsr
|
||||||
# clean-fid
|
# clean-fid
|
||||||
# clip
|
# clip
|
||||||
@ -1927,10 +1954,13 @@ tqdm==4.64.1 \
|
|||||||
# taming-transformers-rom1504
|
# taming-transformers-rom1504
|
||||||
# torch-fidelity
|
# torch-fidelity
|
||||||
# transformers
|
# transformers
|
||||||
|
trampoline==0.1.2 \
|
||||||
|
--hash=sha256:36cc9a4ff9811843d177fc0e0740efbd7da39eadfe6e50c9e2937cbc06d899d9
|
||||||
|
# via torchsde
|
||||||
transformers==4.24.0 \
|
transformers==4.24.0 \
|
||||||
--hash=sha256:486f353a8e594002e48be0e2aba723d96eda839e63bfe274702a4b5eda85559b \
|
--hash=sha256:486f353a8e594002e48be0e2aba723d96eda839e63bfe274702a4b5eda85559b \
|
||||||
--hash=sha256:b7ab50039ef9bf817eff14ab974f306fd20a72350bdc9df3a858fd009419322e
|
--hash=sha256:b7ab50039ef9bf817eff14ab974f306fd20a72350bdc9df3a858fd009419322e
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
typing-extensions==4.4.0 \
|
typing-extensions==4.4.0 \
|
||||||
--hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \
|
--hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \
|
||||||
--hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e
|
--hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e
|
||||||
@ -1944,9 +1974,7 @@ typing-extensions==4.4.0 \
|
|||||||
tzdata==2022.6 \
|
tzdata==2022.6 \
|
||||||
--hash=sha256:04a680bdc5b15750c39c12a448885a51134a27ec9af83667663f0b3a1bf3f342 \
|
--hash=sha256:04a680bdc5b15750c39c12a448885a51134a27ec9af83667663f0b3a1bf3f342 \
|
||||||
--hash=sha256:91f11db4503385928c15598c98573e3af07e7229181bee5375bd30f1695ddcae
|
--hash=sha256:91f11db4503385928c15598c98573e3af07e7229181bee5375bd30f1695ddcae
|
||||||
# via
|
# via pytz-deprecation-shim
|
||||||
# pytz-deprecation-shim
|
|
||||||
# tzlocal
|
|
||||||
tzlocal==4.2 \
|
tzlocal==4.2 \
|
||||||
--hash=sha256:89885494684c929d9191c57aa27502afc87a579be5cdd3225c77c463ea043745 \
|
--hash=sha256:89885494684c929d9191c57aa27502afc87a579be5cdd3225c77c463ea043745 \
|
||||||
--hash=sha256:ee5842fa3a795f023514ac2d801c4a81d1743bbe642e3940143326b3a00addd7
|
--hash=sha256:ee5842fa3a795f023514ac2d801c4a81d1743bbe642e3940143326b3a00addd7
|
@ -2,7 +2,7 @@
|
|||||||
# This file is autogenerated by pip-compile with python 3.10
|
# This file is autogenerated by pip-compile with python 3.10
|
||||||
# To update, run:
|
# To update, run:
|
||||||
#
|
#
|
||||||
# pip-compile --allow-unsafe --generate-hashes --output-file=installer/py3.10-darwin-x86_64-cpu-reqs.txt requirements.in
|
# pip-compile --allow-unsafe --generate-hashes --output-file=installer/py3.10-darwin-x86_64-cpu-reqs.txt installer/requirements.in
|
||||||
#
|
#
|
||||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||||
--trusted-host https
|
--trusted-host https
|
||||||
@ -13,10 +13,12 @@ absl-py==1.3.0 \
|
|||||||
# via
|
# via
|
||||||
# tb-nightly
|
# tb-nightly
|
||||||
# tensorboard
|
# tensorboard
|
||||||
accelerate==0.13.2 \
|
accelerate==0.14.0 \
|
||||||
--hash=sha256:dd6f08b010077f252dda5a7699d87b02885335c456770939c536e65ff07ed760 \
|
--hash=sha256:31c5bcc40564ef849b5bc1c4424a43ccaf9e26413b7df89c2e36bf81f070fd44 \
|
||||||
--hash=sha256:e22180d7094e4c1bfb05a2b078297c222f6b4fa595fde8916946c3f377cdf019
|
--hash=sha256:b15d562c0889d0cf441b01faa025dfc29b163d061b6cc7d489c2c83b0a55ffab
|
||||||
# via k-diffusion
|
# via
|
||||||
|
# -r installer/requirements.in
|
||||||
|
# k-diffusion
|
||||||
addict==2.4.0 \
|
addict==2.4.0 \
|
||||||
--hash=sha256:249bb56bbfd3cdc2a004ea0ff4c2b6ddc84d53bc2194761636eb314d5cfa5dfc \
|
--hash=sha256:249bb56bbfd3cdc2a004ea0ff4c2b6ddc84d53bc2194761636eb314d5cfa5dfc \
|
||||||
--hash=sha256:b3b2210e0e067a281f5646c8c5db92e99b7231ea8b0eb5f74dbdf9e259d4e494
|
--hash=sha256:b3b2210e0e067a281f5646c8c5db92e99b7231ea8b0eb5f74dbdf9e259d4e494
|
||||||
@ -117,7 +119,7 @@ aiosignal==1.2.0 \
|
|||||||
albumentations==1.3.0 \
|
albumentations==1.3.0 \
|
||||||
--hash=sha256:294165d87d03bc8323e484927f0a5c1a3c64b0e7b9c32a979582a6c93c363bdf \
|
--hash=sha256:294165d87d03bc8323e484927f0a5c1a3c64b0e7b9c32a979582a6c93c363bdf \
|
||||||
--hash=sha256:be1af36832c8893314f2a5550e8ac19801e04770734c1b70fa3c996b41f37bed
|
--hash=sha256:be1af36832c8893314f2a5550e8ac19801e04770734c1b70fa3c996b41f37bed
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
altair==4.2.0 \
|
altair==4.2.0 \
|
||||||
--hash=sha256:0c724848ae53410c13fa28be2b3b9a9dcb7b5caa1a70f7f217bd663bb419935a \
|
--hash=sha256:0c724848ae53410c13fa28be2b3b9a9dcb7b5caa1a70f7f217bd663bb419935a \
|
||||||
--hash=sha256:d87d9372e63b48cd96b2a6415f0cf9457f50162ab79dc7a31cd7e024dd840026
|
--hash=sha256:d87d9372e63b48cd96b2a6415f0cf9457f50162ab79dc7a31cd7e024dd840026
|
||||||
@ -180,13 +182,12 @@ click==8.1.3 \
|
|||||||
# wandb
|
# wandb
|
||||||
clip @ https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip \
|
clip @ https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip \
|
||||||
--hash=sha256:b5842c25da441d6c581b53a5c60e0c2127ebafe0f746f8e15561a006c6c3be6a
|
--hash=sha256:b5842c25da441d6c581b53a5c60e0c2127ebafe0f746f8e15561a006c6c3be6a
|
||||||
# via -r requirements.in
|
|
||||||
colorama==0.4.6 \
|
|
||||||
--hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \
|
|
||||||
--hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6
|
|
||||||
# via
|
# via
|
||||||
# click
|
# -r installer/requirements.in
|
||||||
# tqdm
|
# clipseg
|
||||||
|
clipseg @ https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip \
|
||||||
|
--hash=sha256:14f43ed42f90be3fe57f06de483cb8be0f67f87a6f62a011339d45a39f4b4189
|
||||||
|
# via -r installer/requirements.in
|
||||||
commonmark==0.9.1 \
|
commonmark==0.9.1 \
|
||||||
--hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \
|
--hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \
|
||||||
--hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9
|
--hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9
|
||||||
@ -273,7 +274,7 @@ decorator==5.1.1 \
|
|||||||
diffusers==0.7.2 \
|
diffusers==0.7.2 \
|
||||||
--hash=sha256:4a5f8b3a5fbd936bba7d459611cb35ec62875030367be32b232f9e19543e25a9 \
|
--hash=sha256:4a5f8b3a5fbd936bba7d459611cb35ec62875030367be32b232f9e19543e25a9 \
|
||||||
--hash=sha256:fb814ffd150cc6f470380b8c6a521181a77beb2f44134d2aad2e4cd8aa2ced0e
|
--hash=sha256:fb814ffd150cc6f470380b8c6a521181a77beb2f44134d2aad2e4cd8aa2ced0e
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
dnspython==2.2.1 \
|
dnspython==2.2.1 \
|
||||||
--hash=sha256:0f7569a4a6ff151958b64304071d370daa3243d15941a7beedf0c9fe5105603e \
|
--hash=sha256:0f7569a4a6ff151958b64304071d370daa3243d15941a7beedf0c9fe5105603e \
|
||||||
--hash=sha256:a851e51367fb93e9e1361732c1d60dab63eff98712e503ea7d92e6eccb109b4f
|
--hash=sha256:a851e51367fb93e9e1361732c1d60dab63eff98712e503ea7d92e6eccb109b4f
|
||||||
@ -293,7 +294,7 @@ entrypoints==0.4 \
|
|||||||
eventlet==0.33.1 \
|
eventlet==0.33.1 \
|
||||||
--hash=sha256:a085922698e5029f820cf311a648ac324d73cec0e4792877609d978a4b5bbf31 \
|
--hash=sha256:a085922698e5029f820cf311a648ac324d73cec0e4792877609d978a4b5bbf31 \
|
||||||
--hash=sha256:afbe17f06a58491e9aebd7a4a03e70b0b63fd4cf76d8307bae07f280479b1515
|
--hash=sha256:afbe17f06a58491e9aebd7a4a03e70b0b63fd4cf76d8307bae07f280479b1515
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
facexlib==0.2.5 \
|
facexlib==0.2.5 \
|
||||||
--hash=sha256:31e20cc4ed5d63562d380e4564bae14ac0d5d1899a079bad87621e13564567e4 \
|
--hash=sha256:31e20cc4ed5d63562d380e4564bae14ac0d5d1899a079bad87621e13564567e4 \
|
||||||
--hash=sha256:cc7ceb56c5424319c47223cf75eef6828c34c66082707c6eb35b95d39779f02d
|
--hash=sha256:cc7ceb56c5424319c47223cf75eef6828c34c66082707c6eb35b95d39779f02d
|
||||||
@ -319,15 +320,15 @@ flask==2.2.2 \
|
|||||||
flask-cors==3.0.10 \
|
flask-cors==3.0.10 \
|
||||||
--hash=sha256:74efc975af1194fc7891ff5cd85b0f7478be4f7f59fe158102e91abb72bb4438 \
|
--hash=sha256:74efc975af1194fc7891ff5cd85b0f7478be4f7f59fe158102e91abb72bb4438 \
|
||||||
--hash=sha256:b60839393f3b84a0f3746f6cdca56c1ad7426aa738b70d6c61375857823181de
|
--hash=sha256:b60839393f3b84a0f3746f6cdca56c1ad7426aa738b70d6c61375857823181de
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
flask-socketio==5.3.1 \
|
flask-socketio==5.3.1 \
|
||||||
--hash=sha256:fd0ed0fc1341671d92d5f5b2f5503916deb7aa7e2940e6636cfa2c087c828bf9 \
|
--hash=sha256:fd0ed0fc1341671d92d5f5b2f5503916deb7aa7e2940e6636cfa2c087c828bf9 \
|
||||||
--hash=sha256:ff0c721f20bff1e2cfba77948727a8db48f187e89a72fe50c34478ce6efb3353
|
--hash=sha256:ff0c721f20bff1e2cfba77948727a8db48f187e89a72fe50c34478ce6efb3353
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
flaskwebgui==0.3.7 \
|
flaskwebgui==0.3.7 \
|
||||||
--hash=sha256:4a69955308eaa8bb256ba04a994dc8f58a48dcd6f9599694ab1bcd9f43d88a5d \
|
--hash=sha256:4a69955308eaa8bb256ba04a994dc8f58a48dcd6f9599694ab1bcd9f43d88a5d \
|
||||||
--hash=sha256:535974ce2672dcc74787c254de24cceed4101be75d96952dae82014dd57f061e
|
--hash=sha256:535974ce2672dcc74787c254de24cceed4101be75d96952dae82014dd57f061e
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
fonttools==4.38.0 \
|
fonttools==4.38.0 \
|
||||||
--hash=sha256:2bb244009f9bf3fa100fc3ead6aeb99febe5985fa20afbfbaa2f8946c2fbdaf1 \
|
--hash=sha256:2bb244009f9bf3fa100fc3ead6aeb99febe5985fa20afbfbaa2f8946c2fbdaf1 \
|
||||||
--hash=sha256:820466f43c8be8c3009aef8b87e785014133508f0de64ec469e4efb643ae54fb
|
--hash=sha256:820466f43c8be8c3009aef8b87e785014133508f0de64ec469e4efb643ae54fb
|
||||||
@ -411,11 +412,11 @@ future==0.18.2 \
|
|||||||
getpass-asterisk==1.0.1 \
|
getpass-asterisk==1.0.1 \
|
||||||
--hash=sha256:20d45cafda0066d761961e0919728526baf7bb5151fbf48a7d5ea4034127d857 \
|
--hash=sha256:20d45cafda0066d761961e0919728526baf7bb5151fbf48a7d5ea4034127d857 \
|
||||||
--hash=sha256:7cc357a924cf62fa4e15b73cb4e5e30685c9084e464ffdc3fd9000a2b54ea9e9
|
--hash=sha256:7cc357a924cf62fa4e15b73cb4e5e30685c9084e464ffdc3fd9000a2b54ea9e9
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
gfpgan @ https://github.com/TencentARC/GFPGAN/archive/2eac2033893ca7f427f4035d80fe95b92649ac56.zip \
|
gfpgan @ https://github.com/TencentARC/GFPGAN/archive/2eac2033893ca7f427f4035d80fe95b92649ac56.zip \
|
||||||
--hash=sha256:79e6d71c8f1df7c7ccb0ac6b9a2ccb615ad5cde818c8b6f285a8711c05aebf85
|
--hash=sha256:79e6d71c8f1df7c7ccb0ac6b9a2ccb615ad5cde818c8b6f285a8711c05aebf85
|
||||||
# via
|
# via
|
||||||
# -r requirements.in
|
# -r installer/requirements.in
|
||||||
# realesrgan
|
# realesrgan
|
||||||
gitdb==4.0.9 \
|
gitdb==4.0.9 \
|
||||||
--hash=sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd \
|
--hash=sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd \
|
||||||
@ -576,7 +577,7 @@ imageio-ffmpeg==0.4.7 \
|
|||||||
--hash=sha256:7a08838f97f363e37ca41821b864fd3fdc99ab1fe2421040c78eb5f56a9e723e \
|
--hash=sha256:7a08838f97f363e37ca41821b864fd3fdc99ab1fe2421040c78eb5f56a9e723e \
|
||||||
--hash=sha256:8e724d12dfe83e2a6eb39619e820243ca96c81c47c2648e66e05f7ee24e14312 \
|
--hash=sha256:8e724d12dfe83e2a6eb39619e820243ca96c81c47c2648e66e05f7ee24e14312 \
|
||||||
--hash=sha256:fc60686ef03c2d0f842901b206223c30051a6a120384458761390104470846fd
|
--hash=sha256:fc60686ef03c2d0f842901b206223c30051a6a120384458761390104470846fd
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
importlib-metadata==5.0.0 \
|
importlib-metadata==5.0.0 \
|
||||||
--hash=sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab \
|
--hash=sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab \
|
||||||
--hash=sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43
|
--hash=sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43
|
||||||
@ -609,7 +610,7 @@ jsonschema==4.17.0 \
|
|||||||
# jsonmerge
|
# jsonmerge
|
||||||
k-diffusion @ https://github.com/invoke-ai/k-diffusion/archive/7f16b2c33411f26b3eae78d10648d625cb0c1095.zip \
|
k-diffusion @ https://github.com/invoke-ai/k-diffusion/archive/7f16b2c33411f26b3eae78d10648d625cb0c1095.zip \
|
||||||
--hash=sha256:c3f2c84036aa98c3abf4552fafab04df5ca472aa639982795e05bb1db43ce5e4
|
--hash=sha256:c3f2c84036aa98c3abf4552fafab04df5ca472aa639982795e05bb1db43ce5e4
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
kiwisolver==1.4.4 \
|
kiwisolver==1.4.4 \
|
||||||
--hash=sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b \
|
--hash=sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b \
|
||||||
--hash=sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166 \
|
--hash=sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166 \
|
||||||
@ -837,7 +838,9 @@ matplotlib==3.6.2 \
|
|||||||
--hash=sha256:ec9be0f4826cdb3a3a517509dcc5f87f370251b76362051ab59e42b6b765f8c4 \
|
--hash=sha256:ec9be0f4826cdb3a3a517509dcc5f87f370251b76362051ab59e42b6b765f8c4 \
|
||||||
--hash=sha256:f04f97797df35e442ed09f529ad1235d1f1c0f30878e2fe09a2676b71a8801e0 \
|
--hash=sha256:f04f97797df35e442ed09f529ad1235d1f1c0f30878e2fe09a2676b71a8801e0 \
|
||||||
--hash=sha256:f41e57ad63d336fe50d3a67bb8eaa26c09f6dda6a59f76777a99b8ccd8e26aec
|
--hash=sha256:f41e57ad63d336fe50d3a67bb8eaa26c09f6dda6a59f76777a99b8ccd8e26aec
|
||||||
# via filterpy
|
# via
|
||||||
|
# clipseg
|
||||||
|
# filterpy
|
||||||
multidict==6.0.2 \
|
multidict==6.0.2 \
|
||||||
--hash=sha256:0327292e745a880459ef71be14e709aaea2f783f3537588fb4ed09b6c01bca60 \
|
--hash=sha256:0327292e745a880459ef71be14e709aaea2f783f3537588fb4ed09b6c01bca60 \
|
||||||
--hash=sha256:041b81a5f6b38244b34dc18c7b6aba91f9cdaf854d9a39e5ff0b58e2b5773b9c \
|
--hash=sha256:041b81a5f6b38244b34dc18c7b6aba91f9cdaf854d9a39e5ff0b58e2b5773b9c \
|
||||||
@ -970,6 +973,7 @@ numpy==1.23.4 \
|
|||||||
# altair
|
# altair
|
||||||
# basicsr
|
# basicsr
|
||||||
# clean-fid
|
# clean-fid
|
||||||
|
# clipseg
|
||||||
# contourpy
|
# contourpy
|
||||||
# diffusers
|
# diffusers
|
||||||
# facexlib
|
# facexlib
|
||||||
@ -1018,6 +1022,7 @@ opencv-python==4.6.0.66 \
|
|||||||
--hash=sha256:f482e78de6e7b0b060ff994ffd859bddc3f7f382bb2019ef157b0ea8ca8712f5
|
--hash=sha256:f482e78de6e7b0b060ff994ffd859bddc3f7f382bb2019ef157b0ea8ca8712f5
|
||||||
# via
|
# via
|
||||||
# basicsr
|
# basicsr
|
||||||
|
# clipseg
|
||||||
# facexlib
|
# facexlib
|
||||||
# gfpgan
|
# gfpgan
|
||||||
# realesrgan
|
# realesrgan
|
||||||
@ -1080,6 +1085,10 @@ pandas==1.5.1 \
|
|||||||
pathtools==0.1.2 \
|
pathtools==0.1.2 \
|
||||||
--hash=sha256:7c35c5421a39bb82e58018febd90e3b6e5db34c5443aaaf742b3f33d4655f1c0
|
--hash=sha256:7c35c5421a39bb82e58018febd90e3b6e5db34c5443aaaf742b3f33d4655f1c0
|
||||||
# via wandb
|
# via wandb
|
||||||
|
picklescan==0.0.5 \
|
||||||
|
--hash=sha256:368cf1b9a075bc1b6460ad82b694f260532b836c82f99d13846cd36e1bbe7f9a \
|
||||||
|
--hash=sha256:57153eca04d5df5009f2cdd595aef261b8a6f27e03046a1c84f672aa6869c592
|
||||||
|
# via -r installer/requirements.in
|
||||||
pillow==9.3.0 \
|
pillow==9.3.0 \
|
||||||
--hash=sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040 \
|
--hash=sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040 \
|
||||||
--hash=sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8 \
|
--hash=sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8 \
|
||||||
@ -1288,7 +1297,7 @@ pyparsing==3.0.9 \
|
|||||||
pyreadline3==3.4.1 \
|
pyreadline3==3.4.1 \
|
||||||
--hash=sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae \
|
--hash=sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae \
|
||||||
--hash=sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb
|
--hash=sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
pyrsistent==0.19.2 \
|
pyrsistent==0.19.2 \
|
||||||
--hash=sha256:055ab45d5911d7cae397dc418808d8802fb95262751872c841c170b0dbf51eed \
|
--hash=sha256:055ab45d5911d7cae397dc418808d8802fb95262751872c841c170b0dbf51eed \
|
||||||
--hash=sha256:111156137b2e71f3a9936baf27cb322e8024dac3dc54ec7fb9f0bcf3249e68bb \
|
--hash=sha256:111156137b2e71f3a9936baf27cb322e8024dac3dc54ec7fb9f0bcf3249e68bb \
|
||||||
@ -1425,7 +1434,7 @@ qudida==0.0.4 \
|
|||||||
realesrgan==0.3.0 \
|
realesrgan==0.3.0 \
|
||||||
--hash=sha256:0d36da96ab9f447071606e91f502ccdfb08f80cc82ee4f8caf720c7745ccec7e \
|
--hash=sha256:0d36da96ab9f447071606e91f502ccdfb08f80cc82ee4f8caf720c7745ccec7e \
|
||||||
--hash=sha256:59336c16c30dd5130eff350dd27424acb9b7281d18a6810130e265606c9a6088
|
--hash=sha256:59336c16c30dd5130eff350dd27424acb9b7281d18a6810130e265606c9a6088
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
regex==2022.10.31 \
|
regex==2022.10.31 \
|
||||||
--hash=sha256:052b670fafbe30966bbe5d025e90b2a491f85dfe5b2583a163b5e60a85a321ad \
|
--hash=sha256:052b670fafbe30966bbe5d025e90b2a491f85dfe5b2583a163b5e60a85a321ad \
|
||||||
--hash=sha256:0653d012b3bf45f194e5e6a41df9258811ac8fc395579fa82958a8b76286bea4 \
|
--hash=sha256:0653d012b3bf45f194e5e6a41df9258811ac8fc395579fa82958a8b76286bea4 \
|
||||||
@ -1631,6 +1640,7 @@ scipy==1.9.3 \
|
|||||||
# albumentations
|
# albumentations
|
||||||
# basicsr
|
# basicsr
|
||||||
# clean-fid
|
# clean-fid
|
||||||
|
# clipseg
|
||||||
# facexlib
|
# facexlib
|
||||||
# filterpy
|
# filterpy
|
||||||
# gfpgan
|
# gfpgan
|
||||||
@ -1646,7 +1656,7 @@ semver==2.13.0 \
|
|||||||
send2trash==1.8.0 \
|
send2trash==1.8.0 \
|
||||||
--hash=sha256:d2c24762fd3759860a0aff155e45871447ea58d2be6bdd39b5c8f966a0c99c2d \
|
--hash=sha256:d2c24762fd3759860a0aff155e45871447ea58d2be6bdd39b5c8f966a0c99c2d \
|
||||||
--hash=sha256:f20eaadfdb517eaca5ce077640cb261c7d2698385a6a0f072a4a5447fd49fa08
|
--hash=sha256:f20eaadfdb517eaca5ce077640cb261c7d2698385a6a0f072a4a5447fd49fa08
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
sentry-sdk==1.10.1 \
|
sentry-sdk==1.10.1 \
|
||||||
--hash=sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad \
|
--hash=sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad \
|
||||||
--hash=sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691
|
--hash=sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691
|
||||||
@ -1737,11 +1747,11 @@ smmap==5.0.0 \
|
|||||||
streamlit==1.14.0 \
|
streamlit==1.14.0 \
|
||||||
--hash=sha256:62556d873567e1b3427bcd118a57ee6946619f363bd6bba38df2d1f8225ecba0 \
|
--hash=sha256:62556d873567e1b3427bcd118a57ee6946619f363bd6bba38df2d1f8225ecba0 \
|
||||||
--hash=sha256:e078b8143d150ba721bdb9194218e311c5fe1d6d4156473a2dea6cc848a6c9fc
|
--hash=sha256:e078b8143d150ba721bdb9194218e311c5fe1d6d4156473a2dea6cc848a6c9fc
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
taming-transformers-rom1504==0.0.6 \
|
taming-transformers-rom1504==0.0.6 \
|
||||||
--hash=sha256:051b5804c58caa247bcd51d17ddb525b4d5f892a29d42dc460f40e3e9e34e5d8 \
|
--hash=sha256:051b5804c58caa247bcd51d17ddb525b4d5f892a29d42dc460f40e3e9e34e5d8 \
|
||||||
--hash=sha256:73fe5fc1108accee4236ee6976e0987ab236afad0af06cb9f037641a908d2c32
|
--hash=sha256:73fe5fc1108accee4236ee6976e0987ab236afad0af06cb9f037641a908d2c32
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
tb-nightly==2.11.0a20221106 \
|
tb-nightly==2.11.0a20221106 \
|
||||||
--hash=sha256:8940457ee42db92f01da8bcdbbea1a476735eda559dde5976f5728919960af4a
|
--hash=sha256:8940457ee42db92f01da8bcdbbea1a476735eda559dde5976f5728919960af4a
|
||||||
# via
|
# via
|
||||||
@ -1766,7 +1776,7 @@ tensorboard-plugin-wit==1.8.1 \
|
|||||||
# tensorboard
|
# tensorboard
|
||||||
test-tube==0.7.5 \
|
test-tube==0.7.5 \
|
||||||
--hash=sha256:1379c33eb8cde3e9b36610f87da0f16c2e06496b1cfebac473df4e7be2faa124
|
--hash=sha256:1379c33eb8cde3e9b36610f87da0f16c2e06496b1cfebac473df4e7be2faa124
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
threadpoolctl==3.1.0 \
|
threadpoolctl==3.1.0 \
|
||||||
--hash=sha256:8b99adda265feb6773280df41eece7b2e6561b772d21ffd52e372f999024907b \
|
--hash=sha256:8b99adda265feb6773280df41eece7b2e6561b772d21ffd52e372f999024907b \
|
||||||
--hash=sha256:a335baacfaa4400ae1f0d8e3a58d6674d2f8828e3716bb2802c44955ad391380
|
--hash=sha256:a335baacfaa4400ae1f0d8e3a58d6674d2f8828e3716bb2802c44955ad391380
|
||||||
@ -1816,29 +1826,29 @@ toolz==0.12.0 \
|
|||||||
--hash=sha256:2059bd4148deb1884bb0eb770a3cde70e7f954cfbbdc2285f1f2de01fd21eb6f \
|
--hash=sha256:2059bd4148deb1884bb0eb770a3cde70e7f954cfbbdc2285f1f2de01fd21eb6f \
|
||||||
--hash=sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194
|
--hash=sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194
|
||||||
# via altair
|
# via altair
|
||||||
torch==1.13.0 \
|
torch==1.12.0 ; platform_system == "Darwin" \
|
||||||
--hash=sha256:0fdd38c96230947b1ed870fed4a560252f8d23c3a2bf4dab9d2d42b18f2e67c8 \
|
--hash=sha256:0399746f83b4541bcb5b219a18dbe8cade760aba1c660d2748a38c6dc338ebc7 \
|
||||||
--hash=sha256:220325d0f4e69ee9edf00c04208244ef7cf22ebce083815ce272c7491f0603f5 \
|
--hash=sha256:0986685f2ec8b7c4d3593e8cfe96be85d462943f1a8f54112fc48d4d9fbbe903 \
|
||||||
--hash=sha256:43db0723fc66ad6486f86dc4890c497937f7cd27429f28f73fb7e4d74b7482e2 \
|
--hash=sha256:13c7cca6b2ea3704d775444f02af53c5f072d145247e17b8cd7813ac57869f03 \
|
||||||
--hash=sha256:47fe6228386bff6d74319a2ffe9d4ed943e6e85473d78e80502518c607d644d2 \
|
--hash=sha256:201abf43a99bb4980cc827dd4b38ac28f35e4dddac7832718be3d5479cafd2c1 \
|
||||||
--hash=sha256:49a949b8136b32b2ec0724cbf4c6678b54e974b7d68f19f1231eea21cde5c23b \
|
--hash=sha256:2143d5fe192fd908b70b494349de5b1ac02854a8a902bd5f47d13d85b410e430 \
|
||||||
--hash=sha256:4a378f5091307381abfb30eb821174e12986f39b1cf7c4522bf99155256819eb \
|
--hash=sha256:2568f011dddeb5990d8698cc375d237f14568ffa8489854e3b94113b4b6b7c8b \
|
||||||
--hash=sha256:635dbb99d981a6483ca533b3dc7be18ef08dd9e1e96fb0bb0e6a99d79e85a130 \
|
--hash=sha256:3322d33a06e440d715bb214334bd41314c94632d9a2f07d22006bf21da3a2be4 \
|
||||||
--hash=sha256:6c227c16626e4ce766cca5351cc62a2358a11e8e466410a298487b9dff159eb1 \
|
--hash=sha256:349ea3ba0c0e789e0507876c023181f13b35307aebc2e771efd0e045b8e03e84 \
|
||||||
--hash=sha256:857c7d5b1624c5fd979f66d2b074765733dba3f5e1cc97b7d6909155a2aae3ce \
|
--hash=sha256:44a3804e9bb189574f5d02ccc2dc6e32e26a81b3e095463b7067b786048c6072 \
|
||||||
--hash=sha256:9197ec216833b836b67e4d68e513d31fb38d9789d7cd998a08fba5b499c38454 \
|
--hash=sha256:5ed69d5af232c5c3287d44cef998880dadcc9721cd020e9ae02f42e56b79c2e4 \
|
||||||
--hash=sha256:922a4910613b310fbeb87707f00cb76fec328eb60cc1349ed2173e7c9b6edcd8 \
|
--hash=sha256:60d06ee2abfa85f10582d205404d52889d69bcbb71f7e211cfc37e3957ac19ca \
|
||||||
--hash=sha256:9ac382cedaf2f70afea41380ad8e7c06acef6b5b7e2aef3971cdad666ca6e185 \
|
--hash=sha256:63341f96840a223f277e498d2737b39da30d9f57c7a1ef88857b920096317739 \
|
||||||
--hash=sha256:bb33a911460475d1594a8c8cb73f58c08293211760796d99cae8c2509b86d7f1 \
|
--hash=sha256:72207b8733523388c49d43ffcc4416d1d8cd64c40f7826332e714605ace9b1d2 \
|
||||||
--hash=sha256:cd1e67db6575e1b173a626077a54e4911133178557aac50683db03a34e2b636a \
|
--hash=sha256:7ddb167827170c4e3ff6a27157414a00b9fef93dea175da04caf92a0619b7aee \
|
||||||
--hash=sha256:d2d2753519415d154de4d3e64d2eaaeefdba6b6fd7d69d5ffaef595988117700 \
|
--hash=sha256:844f1db41173b53fe40c44b3e04fcca23a6ce00ac328b7099f2800e611766845 \
|
||||||
--hash=sha256:e20df14d874b024851c58e8bb3846249cb120e677f7463f60c986e3661f88680 \
|
--hash=sha256:a1325c9c28823af497cbf443369bddac9ac59f67f1e600f8ab9b754958e55b76 \
|
||||||
--hash=sha256:e643ac8d086706e82f77b5d4dfcf145a9dd37b69e03e64177fc23821754d2ed7 \
|
--hash=sha256:abbdc5483359b9495dc76e3bd7911ccd2ddc57706c117f8316832e31590af871 \
|
||||||
--hash=sha256:ef934a21da6f6a516d0a9c712a80d09c56128abdc6af8dc151bee5199b4c3b4e \
|
--hash=sha256:c0313438bc36448ffd209f5fb4e5f325b3af158cdf61c8829b8ddaf128c57816 \
|
||||||
--hash=sha256:f01a9ae0d4b69d2fc4145e8beab45b7877342dddbd4838a7d3c11ca7f6680745 \
|
--hash=sha256:e3e8348edca3e3cee5a67a2b452b85c57712efe1cc3ffdb87c128b3dde54534e \
|
||||||
--hash=sha256:f68edfea71ade3862039ba66bcedf954190a2db03b0c41a9b79afd72210abd97 \
|
--hash=sha256:fb47291596677570246d723ee6abbcbac07eeba89d8f83de31e3954f21f44879
|
||||||
--hash=sha256:fa768432ce4b8ffa29184c79a3376ab3de4a57b302cdf3c026a6be4c5a8ab75b
|
|
||||||
# via
|
# via
|
||||||
|
# -r installer/requirements.in
|
||||||
# accelerate
|
# accelerate
|
||||||
# basicsr
|
# basicsr
|
||||||
# clean-fid
|
# clean-fid
|
||||||
@ -1858,7 +1868,7 @@ torch==1.13.0 \
|
|||||||
torch-fidelity==0.3.0 \
|
torch-fidelity==0.3.0 \
|
||||||
--hash=sha256:3d3e33db98919759cc4f3f24cb27e1e74bdc7c905d90a780630e4e1c18492b66 \
|
--hash=sha256:3d3e33db98919759cc4f3f24cb27e1e74bdc7c905d90a780630e4e1c18492b66 \
|
||||||
--hash=sha256:d01284825595feb7dc3eae3dc9a0d8ced02be764813a3483f109bc142b52a1d3
|
--hash=sha256:d01284825595feb7dc3eae3dc9a0d8ced02be764813a3483f109bc142b52a1d3
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
torchdiffeq==0.2.3 \
|
torchdiffeq==0.2.3 \
|
||||||
--hash=sha256:b5b01ec1294a2d8d5f77e567bf17c5de1237c0573cb94deefa88326f0e18c338 \
|
--hash=sha256:b5b01ec1294a2d8d5f77e567bf17c5de1237c0573cb94deefa88326f0e18c338 \
|
||||||
--hash=sha256:fe75f434b9090ac0c27702e02bed21472b0f87035be6581f51edc5d4013ea31a
|
--hash=sha256:fe75f434b9090ac0c27702e02bed21472b0f87035be6581f51edc5d4013ea31a
|
||||||
@ -1867,28 +1877,28 @@ torchmetrics==0.10.2 \
|
|||||||
--hash=sha256:43757d82266969906fc74b6e80766fcb2a0d52d6c3d09e3b7c98cf3b733fd20c \
|
--hash=sha256:43757d82266969906fc74b6e80766fcb2a0d52d6c3d09e3b7c98cf3b733fd20c \
|
||||||
--hash=sha256:daa29d96bff5cff04d80eec5b9f5076993d6ac9c2d2163e88b6b31f8d38f7c25
|
--hash=sha256:daa29d96bff5cff04d80eec5b9f5076993d6ac9c2d2163e88b6b31f8d38f7c25
|
||||||
# via pytorch-lightning
|
# via pytorch-lightning
|
||||||
torchvision==0.13.1 ; platform_system == "Darwin" \
|
torchvision==0.13.0 ; platform_system == "Darwin" \
|
||||||
--hash=sha256:0298bae3b09ac361866088434008d82b99d6458fe8888c8df90720ef4b347d44 \
|
--hash=sha256:01e9e7b2e7724e66561e8d98f900985d80191e977c5c0b3f33ed31800ba0210c \
|
||||||
--hash=sha256:08f592ea61836ebeceb5c97f4d7a813b9d7dc651bbf7ce4401563ccfae6a21fc \
|
--hash=sha256:0e28740bd5695076f7c449af650fc474d6566722d446461c2ceebf9c9599b37f \
|
||||||
--hash=sha256:099874088df104d54d8008f2a28539ca0117b512daed8bf3c2bbfa2b7ccb187a \
|
--hash=sha256:1b703701f0b99f307ad925b1abda2b3d5bdbf30643ff02102b6aeeb8840ae278 \
|
||||||
--hash=sha256:0e77706cc90462653620e336bb90daf03d7bf1b88c3a9a3037df8d111823a56e \
|
--hash=sha256:1e2049f1207631d42d743205f663f1d2235796565be3f18b0339d479626faf30 \
|
||||||
--hash=sha256:19286a733c69dcbd417b86793df807bd227db5786ed787c17297741a9b0d0fc7 \
|
--hash=sha256:253eb0c67bf88cef4a79ec69058c3e94f9fde28b9e3699ad1afc0b3ed50f8075 \
|
||||||
--hash=sha256:3567fb3def829229ec217c1e38f08c5128ff7fb65854cac17ebac358ff7aa309 \
|
--hash=sha256:42d95ab197d090efc5669fec02fbc603d05c859e50ca2c60180d1a113aa9b3e2 \
|
||||||
--hash=sha256:4d8bf321c4380854ef04613935fdd415dce29d1088a7ff99e06e113f0efe9203 \
|
--hash=sha256:5c31e9b3004142dbfdf32adc4cf2d4fd709b820833e9786f839ae3a91ff65ef0 \
|
||||||
--hash=sha256:5e631241bee3661de64f83616656224af2e3512eb2580da7c08e08b8c965a8ac \
|
--hash=sha256:61d5093a50b7923a4e5bf9e0271001c29e01abec2348b7dd93370a0a9d15836c \
|
||||||
--hash=sha256:7552e80fa222252b8b217a951c85e172a710ea4cad0ae0c06fbb67addece7871 \
|
--hash=sha256:667cac55afb13cda7d362466e7eba3119e529b210e55507d231bead09aca5e1f \
|
||||||
--hash=sha256:7cb789ceefe6dcd0dc8eeda37bfc45efb7cf34770eac9533861d51ca508eb5b3 \
|
--hash=sha256:6c4c35428c758adc485ff8f239b5ed68c1b6c26efa261a52e431cab0f7f22aec \
|
||||||
--hash=sha256:83e9e2457f23110fd53b0177e1bc621518d6ea2108f570e853b768ce36b7c679 \
|
--hash=sha256:83a4d9d50787d1e886c94486b63b15978391f6cf1892fce6a93132c09b14e128 \
|
||||||
--hash=sha256:87c137f343197769a51333076e66bfcd576301d2cd8614b06657187c71b06c4f \
|
--hash=sha256:a20662c11dc14fd4eff102ceb946a7ee80b9f98303bb52435cc903f2c4c1fe10 \
|
||||||
--hash=sha256:899eec0b9f3b99b96d6f85b9aa58c002db41c672437677b553015b9135b3be7e \
|
--hash=sha256:acb72a40e5dc0cd454d28514dbdd589a5057afd9bb5c785b87a54718b999bfa1 \
|
||||||
--hash=sha256:8e4d02e4d8a203e0c09c10dfb478214c224d080d31efc0dbf36d9c4051f7f3c6 \
|
--hash=sha256:ad458146aca15f652f9b0c227bebd5403602c7341f15f68f20ec119fa8e8f4a5 \
|
||||||
--hash=sha256:b167934a5943242da7b1e59318f911d2d253feeca0d13ad5d832b58eed943401 \
|
--hash=sha256:ada295dbfe55017b02acfab960a997387f5addbadd28ee5e575e24f692992ce4 \
|
||||||
--hash=sha256:c5ed609c8bc88c575226400b2232e0309094477c82af38952e0373edef0003fd \
|
--hash=sha256:b620a43df4131ad09f5761c415a016a9ea95aaf8ec8c91d030fb59bad591094a \
|
||||||
--hash=sha256:e9a563894f9fa40692e24d1aa58c3ef040450017cfed3598ff9637f404f3fe3b \
|
--hash=sha256:b7a2c9aebc7ef265777fe7e82577364288d98cf6b8cf0a63bb2621df78a7af1a \
|
||||||
--hash=sha256:ef5fe3ec1848123cd0ec74c07658192b3147dcd38e507308c790d5943e87b88c \
|
--hash=sha256:c2278a189663087bb8e65915062aa7a25b8f8e5a3cfaa5879fe277e23e4bbf40 \
|
||||||
--hash=sha256:f230a1a40ed70d51e463ce43df243ec520902f8725de2502e485efc5eea9d864
|
--hash=sha256:df16abf31e7a5fce8db1f781bf1e4f20c8bc730c7c3f657e946cc5820c04e465
|
||||||
# via
|
# via
|
||||||
# -r requirements.in
|
# -r installer/requirements.in
|
||||||
# basicsr
|
# basicsr
|
||||||
# clean-fid
|
# clean-fid
|
||||||
# clip
|
# clip
|
||||||
@ -1930,7 +1940,7 @@ tqdm==4.64.1 \
|
|||||||
transformers==4.24.0 \
|
transformers==4.24.0 \
|
||||||
--hash=sha256:486f353a8e594002e48be0e2aba723d96eda839e63bfe274702a4b5eda85559b \
|
--hash=sha256:486f353a8e594002e48be0e2aba723d96eda839e63bfe274702a4b5eda85559b \
|
||||||
--hash=sha256:b7ab50039ef9bf817eff14ab974f306fd20a72350bdc9df3a858fd009419322e
|
--hash=sha256:b7ab50039ef9bf817eff14ab974f306fd20a72350bdc9df3a858fd009419322e
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
typing-extensions==4.4.0 \
|
typing-extensions==4.4.0 \
|
||||||
--hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \
|
--hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \
|
||||||
--hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e
|
--hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e
|
||||||
@ -1944,9 +1954,7 @@ typing-extensions==4.4.0 \
|
|||||||
tzdata==2022.6 \
|
tzdata==2022.6 \
|
||||||
--hash=sha256:04a680bdc5b15750c39c12a448885a51134a27ec9af83667663f0b3a1bf3f342 \
|
--hash=sha256:04a680bdc5b15750c39c12a448885a51134a27ec9af83667663f0b3a1bf3f342 \
|
||||||
--hash=sha256:91f11db4503385928c15598c98573e3af07e7229181bee5375bd30f1695ddcae
|
--hash=sha256:91f11db4503385928c15598c98573e3af07e7229181bee5375bd30f1695ddcae
|
||||||
# via
|
# via pytz-deprecation-shim
|
||||||
# pytz-deprecation-shim
|
|
||||||
# tzlocal
|
|
||||||
tzlocal==4.2 \
|
tzlocal==4.2 \
|
||||||
--hash=sha256:89885494684c929d9191c57aa27502afc87a579be5cdd3225c77c463ea043745 \
|
--hash=sha256:89885494684c929d9191c57aa27502afc87a579be5cdd3225c77c463ea043745 \
|
||||||
--hash=sha256:ee5842fa3a795f023514ac2d801c4a81d1743bbe642e3940143326b3a00addd7
|
--hash=sha256:ee5842fa3a795f023514ac2d801c4a81d1743bbe642e3940143326b3a00addd7
|
@ -1,9 +1,10 @@
|
|||||||
#
|
#
|
||||||
# This file is autogenerated by pip-compile with python 3.10
|
# This file is autogenerated by pip-compile with Python 3.9
|
||||||
# To update, run:
|
# by the following command:
|
||||||
#
|
#
|
||||||
# pip-compile --allow-unsafe --generate-hashes --output-file=installer/py3.10-linux-x86_64-cuda-reqs.txt requirements.in
|
# pip-compile --allow-unsafe --generate-hashes --output-file=binary_installer/py3.10-linux-x86_64-cuda-reqs.txt binary_installer/requirements.in
|
||||||
#
|
#
|
||||||
|
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
||||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||||
--trusted-host https
|
--trusted-host https
|
||||||
|
|
||||||
@ -13,10 +14,12 @@ absl-py==1.3.0 \
|
|||||||
# via
|
# via
|
||||||
# tb-nightly
|
# tb-nightly
|
||||||
# tensorboard
|
# tensorboard
|
||||||
accelerate==0.13.2 \
|
accelerate==0.14.0 \
|
||||||
--hash=sha256:dd6f08b010077f252dda5a7699d87b02885335c456770939c536e65ff07ed760 \
|
--hash=sha256:31c5bcc40564ef849b5bc1c4424a43ccaf9e26413b7df89c2e36bf81f070fd44 \
|
||||||
--hash=sha256:e22180d7094e4c1bfb05a2b078297c222f6b4fa595fde8916946c3f377cdf019
|
--hash=sha256:b15d562c0889d0cf441b01faa025dfc29b163d061b6cc7d489c2c83b0a55ffab
|
||||||
# via k-diffusion
|
# via
|
||||||
|
# -r binary_installer/requirements.in
|
||||||
|
# k-diffusion
|
||||||
addict==2.4.0 \
|
addict==2.4.0 \
|
||||||
--hash=sha256:249bb56bbfd3cdc2a004ea0ff4c2b6ddc84d53bc2194761636eb314d5cfa5dfc \
|
--hash=sha256:249bb56bbfd3cdc2a004ea0ff4c2b6ddc84d53bc2194761636eb314d5cfa5dfc \
|
||||||
--hash=sha256:b3b2210e0e067a281f5646c8c5db92e99b7231ea8b0eb5f74dbdf9e259d4e494
|
--hash=sha256:b3b2210e0e067a281f5646c8c5db92e99b7231ea8b0eb5f74dbdf9e259d4e494
|
||||||
@ -117,7 +120,7 @@ aiosignal==1.2.0 \
|
|||||||
albumentations==1.3.0 \
|
albumentations==1.3.0 \
|
||||||
--hash=sha256:294165d87d03bc8323e484927f0a5c1a3c64b0e7b9c32a979582a6c93c363bdf \
|
--hash=sha256:294165d87d03bc8323e484927f0a5c1a3c64b0e7b9c32a979582a6c93c363bdf \
|
||||||
--hash=sha256:be1af36832c8893314f2a5550e8ac19801e04770734c1b70fa3c996b41f37bed
|
--hash=sha256:be1af36832c8893314f2a5550e8ac19801e04770734c1b70fa3c996b41f37bed
|
||||||
# via -r requirements.in
|
# via -r binary_installer/requirements.in
|
||||||
altair==4.2.0 \
|
altair==4.2.0 \
|
||||||
--hash=sha256:0c724848ae53410c13fa28be2b3b9a9dcb7b5caa1a70f7f217bd663bb419935a \
|
--hash=sha256:0c724848ae53410c13fa28be2b3b9a9dcb7b5caa1a70f7f217bd663bb419935a \
|
||||||
--hash=sha256:d87d9372e63b48cd96b2a6415f0cf9457f50162ab79dc7a31cd7e024dd840026
|
--hash=sha256:d87d9372e63b48cd96b2a6415f0cf9457f50162ab79dc7a31cd7e024dd840026
|
||||||
@ -148,6 +151,10 @@ blinker==1.5 \
|
|||||||
--hash=sha256:1eb563df6fdbc39eeddc177d953203f99f097e9bf0e2b8f9f3cf18b6ca425e36 \
|
--hash=sha256:1eb563df6fdbc39eeddc177d953203f99f097e9bf0e2b8f9f3cf18b6ca425e36 \
|
||||||
--hash=sha256:923e5e2f69c155f2cc42dafbbd70e16e3fde24d2d4aa2ab72fbe386238892462
|
--hash=sha256:923e5e2f69c155f2cc42dafbbd70e16e3fde24d2d4aa2ab72fbe386238892462
|
||||||
# via streamlit
|
# via streamlit
|
||||||
|
boltons==21.0.0 \
|
||||||
|
--hash=sha256:65e70a79a731a7fe6e98592ecfb5ccf2115873d01dbc576079874629e5c90f13 \
|
||||||
|
--hash=sha256:b9bb7b58b2b420bbe11a6025fdef6d3e5edc9f76a42fb467afe7ca212ef9948b
|
||||||
|
# via torchsde
|
||||||
cachetools==5.2.0 \
|
cachetools==5.2.0 \
|
||||||
--hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \
|
--hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \
|
||||||
--hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db
|
--hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db
|
||||||
@ -180,7 +187,12 @@ click==8.1.3 \
|
|||||||
# wandb
|
# wandb
|
||||||
clip @ https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip \
|
clip @ https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip \
|
||||||
--hash=sha256:b5842c25da441d6c581b53a5c60e0c2127ebafe0f746f8e15561a006c6c3be6a
|
--hash=sha256:b5842c25da441d6c581b53a5c60e0c2127ebafe0f746f8e15561a006c6c3be6a
|
||||||
# via -r requirements.in
|
# via
|
||||||
|
# -r binary_installer/requirements.in
|
||||||
|
# clipseg
|
||||||
|
clipseg @ https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip \
|
||||||
|
--hash=sha256:14f43ed42f90be3fe57f06de483cb8be0f67f87a6f62a011339d45a39f4b4189
|
||||||
|
# via -r binary_installer/requirements.in
|
||||||
commonmark==0.9.1 \
|
commonmark==0.9.1 \
|
||||||
--hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \
|
--hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \
|
||||||
--hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9
|
--hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9
|
||||||
@ -267,7 +279,7 @@ decorator==5.1.1 \
|
|||||||
diffusers==0.7.2 \
|
diffusers==0.7.2 \
|
||||||
--hash=sha256:4a5f8b3a5fbd936bba7d459611cb35ec62875030367be32b232f9e19543e25a9 \
|
--hash=sha256:4a5f8b3a5fbd936bba7d459611cb35ec62875030367be32b232f9e19543e25a9 \
|
||||||
--hash=sha256:fb814ffd150cc6f470380b8c6a521181a77beb2f44134d2aad2e4cd8aa2ced0e
|
--hash=sha256:fb814ffd150cc6f470380b8c6a521181a77beb2f44134d2aad2e4cd8aa2ced0e
|
||||||
# via -r requirements.in
|
# via -r binary_installer/requirements.in
|
||||||
dnspython==2.2.1 \
|
dnspython==2.2.1 \
|
||||||
--hash=sha256:0f7569a4a6ff151958b64304071d370daa3243d15941a7beedf0c9fe5105603e \
|
--hash=sha256:0f7569a4a6ff151958b64304071d370daa3243d15941a7beedf0c9fe5105603e \
|
||||||
--hash=sha256:a851e51367fb93e9e1361732c1d60dab63eff98712e503ea7d92e6eccb109b4f
|
--hash=sha256:a851e51367fb93e9e1361732c1d60dab63eff98712e503ea7d92e6eccb109b4f
|
||||||
@ -287,7 +299,7 @@ entrypoints==0.4 \
|
|||||||
eventlet==0.33.1 \
|
eventlet==0.33.1 \
|
||||||
--hash=sha256:a085922698e5029f820cf311a648ac324d73cec0e4792877609d978a4b5bbf31 \
|
--hash=sha256:a085922698e5029f820cf311a648ac324d73cec0e4792877609d978a4b5bbf31 \
|
||||||
--hash=sha256:afbe17f06a58491e9aebd7a4a03e70b0b63fd4cf76d8307bae07f280479b1515
|
--hash=sha256:afbe17f06a58491e9aebd7a4a03e70b0b63fd4cf76d8307bae07f280479b1515
|
||||||
# via -r requirements.in
|
# via -r binary_installer/requirements.in
|
||||||
facexlib==0.2.5 \
|
facexlib==0.2.5 \
|
||||||
--hash=sha256:31e20cc4ed5d63562d380e4564bae14ac0d5d1899a079bad87621e13564567e4 \
|
--hash=sha256:31e20cc4ed5d63562d380e4564bae14ac0d5d1899a079bad87621e13564567e4 \
|
||||||
--hash=sha256:cc7ceb56c5424319c47223cf75eef6828c34c66082707c6eb35b95d39779f02d
|
--hash=sha256:cc7ceb56c5424319c47223cf75eef6828c34c66082707c6eb35b95d39779f02d
|
||||||
@ -313,15 +325,15 @@ flask==2.2.2 \
|
|||||||
flask-cors==3.0.10 \
|
flask-cors==3.0.10 \
|
||||||
--hash=sha256:74efc975af1194fc7891ff5cd85b0f7478be4f7f59fe158102e91abb72bb4438 \
|
--hash=sha256:74efc975af1194fc7891ff5cd85b0f7478be4f7f59fe158102e91abb72bb4438 \
|
||||||
--hash=sha256:b60839393f3b84a0f3746f6cdca56c1ad7426aa738b70d6c61375857823181de
|
--hash=sha256:b60839393f3b84a0f3746f6cdca56c1ad7426aa738b70d6c61375857823181de
|
||||||
# via -r requirements.in
|
# via -r binary_installer/requirements.in
|
||||||
flask-socketio==5.3.1 \
|
flask-socketio==5.3.1 \
|
||||||
--hash=sha256:fd0ed0fc1341671d92d5f5b2f5503916deb7aa7e2940e6636cfa2c087c828bf9 \
|
--hash=sha256:fd0ed0fc1341671d92d5f5b2f5503916deb7aa7e2940e6636cfa2c087c828bf9 \
|
||||||
--hash=sha256:ff0c721f20bff1e2cfba77948727a8db48f187e89a72fe50c34478ce6efb3353
|
--hash=sha256:ff0c721f20bff1e2cfba77948727a8db48f187e89a72fe50c34478ce6efb3353
|
||||||
# via -r requirements.in
|
# via -r binary_installer/requirements.in
|
||||||
flaskwebgui==0.3.7 \
|
flaskwebgui==0.3.7 \
|
||||||
--hash=sha256:4a69955308eaa8bb256ba04a994dc8f58a48dcd6f9599694ab1bcd9f43d88a5d \
|
--hash=sha256:4a69955308eaa8bb256ba04a994dc8f58a48dcd6f9599694ab1bcd9f43d88a5d \
|
||||||
--hash=sha256:535974ce2672dcc74787c254de24cceed4101be75d96952dae82014dd57f061e
|
--hash=sha256:535974ce2672dcc74787c254de24cceed4101be75d96952dae82014dd57f061e
|
||||||
# via -r requirements.in
|
# via -r binary_installer/requirements.in
|
||||||
fonttools==4.38.0 \
|
fonttools==4.38.0 \
|
||||||
--hash=sha256:2bb244009f9bf3fa100fc3ead6aeb99febe5985fa20afbfbaa2f8946c2fbdaf1 \
|
--hash=sha256:2bb244009f9bf3fa100fc3ead6aeb99febe5985fa20afbfbaa2f8946c2fbdaf1 \
|
||||||
--hash=sha256:820466f43c8be8c3009aef8b87e785014133508f0de64ec469e4efb643ae54fb
|
--hash=sha256:820466f43c8be8c3009aef8b87e785014133508f0de64ec469e4efb643ae54fb
|
||||||
@ -405,11 +417,11 @@ future==0.18.2 \
|
|||||||
getpass-asterisk==1.0.1 \
|
getpass-asterisk==1.0.1 \
|
||||||
--hash=sha256:20d45cafda0066d761961e0919728526baf7bb5151fbf48a7d5ea4034127d857 \
|
--hash=sha256:20d45cafda0066d761961e0919728526baf7bb5151fbf48a7d5ea4034127d857 \
|
||||||
--hash=sha256:7cc357a924cf62fa4e15b73cb4e5e30685c9084e464ffdc3fd9000a2b54ea9e9
|
--hash=sha256:7cc357a924cf62fa4e15b73cb4e5e30685c9084e464ffdc3fd9000a2b54ea9e9
|
||||||
# via -r requirements.in
|
# via -r binary_installer/requirements.in
|
||||||
gfpgan @ https://github.com/TencentARC/GFPGAN/archive/2eac2033893ca7f427f4035d80fe95b92649ac56.zip \
|
gfpgan @ https://github.com/invoke-ai/GFPGAN/archive/c796277a1cf77954e5fc0b288d7062d162894248.zip ; platform_system == "Linux" or platform_system == "Darwin" \
|
||||||
--hash=sha256:79e6d71c8f1df7c7ccb0ac6b9a2ccb615ad5cde818c8b6f285a8711c05aebf85
|
--hash=sha256:4155907b8b7db3686324554df7007eedd245cdf8656c21da9d9a3f44bef2fcaa
|
||||||
# via
|
# via
|
||||||
# -r requirements.in
|
# -r binary_installer/requirements.in
|
||||||
# realesrgan
|
# realesrgan
|
||||||
gitdb==4.0.9 \
|
gitdb==4.0.9 \
|
||||||
--hash=sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd \
|
--hash=sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd \
|
||||||
@ -570,12 +582,14 @@ imageio-ffmpeg==0.4.7 \
|
|||||||
--hash=sha256:7a08838f97f363e37ca41821b864fd3fdc99ab1fe2421040c78eb5f56a9e723e \
|
--hash=sha256:7a08838f97f363e37ca41821b864fd3fdc99ab1fe2421040c78eb5f56a9e723e \
|
||||||
--hash=sha256:8e724d12dfe83e2a6eb39619e820243ca96c81c47c2648e66e05f7ee24e14312 \
|
--hash=sha256:8e724d12dfe83e2a6eb39619e820243ca96c81c47c2648e66e05f7ee24e14312 \
|
||||||
--hash=sha256:fc60686ef03c2d0f842901b206223c30051a6a120384458761390104470846fd
|
--hash=sha256:fc60686ef03c2d0f842901b206223c30051a6a120384458761390104470846fd
|
||||||
# via -r requirements.in
|
# via -r binary_installer/requirements.in
|
||||||
importlib-metadata==5.0.0 \
|
importlib-metadata==5.0.0 \
|
||||||
--hash=sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab \
|
--hash=sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab \
|
||||||
--hash=sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43
|
--hash=sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43
|
||||||
# via
|
# via
|
||||||
# diffusers
|
# diffusers
|
||||||
|
# flask
|
||||||
|
# markdown
|
||||||
# streamlit
|
# streamlit
|
||||||
itsdangerous==2.1.2 \
|
itsdangerous==2.1.2 \
|
||||||
--hash=sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44 \
|
--hash=sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44 \
|
||||||
@ -601,9 +615,9 @@ jsonschema==4.17.0 \
|
|||||||
# via
|
# via
|
||||||
# altair
|
# altair
|
||||||
# jsonmerge
|
# jsonmerge
|
||||||
k-diffusion @ https://github.com/invoke-ai/k-diffusion/archive/7f16b2c33411f26b3eae78d10648d625cb0c1095.zip \
|
k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip \
|
||||||
--hash=sha256:c3f2c84036aa98c3abf4552fafab04df5ca472aa639982795e05bb1db43ce5e4
|
--hash=sha256:8eac5cdc08736e6d61908a1b2948f2b2f62691b01dc1aab978bddb3451af0d66
|
||||||
# via -r requirements.in
|
# via -r binary_installer/requirements.in
|
||||||
kiwisolver==1.4.4 \
|
kiwisolver==1.4.4 \
|
||||||
--hash=sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b \
|
--hash=sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b \
|
||||||
--hash=sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166 \
|
--hash=sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166 \
|
||||||
@ -831,7 +845,9 @@ matplotlib==3.6.2 \
|
|||||||
--hash=sha256:ec9be0f4826cdb3a3a517509dcc5f87f370251b76362051ab59e42b6b765f8c4 \
|
--hash=sha256:ec9be0f4826cdb3a3a517509dcc5f87f370251b76362051ab59e42b6b765f8c4 \
|
||||||
--hash=sha256:f04f97797df35e442ed09f529ad1235d1f1c0f30878e2fe09a2676b71a8801e0 \
|
--hash=sha256:f04f97797df35e442ed09f529ad1235d1f1c0f30878e2fe09a2676b71a8801e0 \
|
||||||
--hash=sha256:f41e57ad63d336fe50d3a67bb8eaa26c09f6dda6a59f76777a99b8ccd8e26aec
|
--hash=sha256:f41e57ad63d336fe50d3a67bb8eaa26c09f6dda6a59f76777a99b8ccd8e26aec
|
||||||
# via filterpy
|
# via
|
||||||
|
# clipseg
|
||||||
|
# filterpy
|
||||||
multidict==6.0.2 \
|
multidict==6.0.2 \
|
||||||
--hash=sha256:0327292e745a880459ef71be14e709aaea2f783f3537588fb4ed09b6c01bca60 \
|
--hash=sha256:0327292e745a880459ef71be14e709aaea2f783f3537588fb4ed09b6c01bca60 \
|
||||||
--hash=sha256:041b81a5f6b38244b34dc18c7b6aba91f9cdaf854d9a39e5ff0b58e2b5773b9c \
|
--hash=sha256:041b81a5f6b38244b34dc18c7b6aba91f9cdaf854d9a39e5ff0b58e2b5773b9c \
|
||||||
@ -964,6 +980,7 @@ numpy==1.23.4 \
|
|||||||
# altair
|
# altair
|
||||||
# basicsr
|
# basicsr
|
||||||
# clean-fid
|
# clean-fid
|
||||||
|
# clipseg
|
||||||
# contourpy
|
# contourpy
|
||||||
# diffusers
|
# diffusers
|
||||||
# facexlib
|
# facexlib
|
||||||
@ -977,6 +994,7 @@ numpy==1.23.4 \
|
|||||||
# pandas
|
# pandas
|
||||||
# pyarrow
|
# pyarrow
|
||||||
# pydeck
|
# pydeck
|
||||||
|
# pypatchmatch
|
||||||
# pytorch-lightning
|
# pytorch-lightning
|
||||||
# pywavelets
|
# pywavelets
|
||||||
# qudida
|
# qudida
|
||||||
@ -992,6 +1010,7 @@ numpy==1.23.4 \
|
|||||||
# tifffile
|
# tifffile
|
||||||
# torch-fidelity
|
# torch-fidelity
|
||||||
# torchmetrics
|
# torchmetrics
|
||||||
|
# torchsde
|
||||||
# torchvision
|
# torchvision
|
||||||
# transformers
|
# transformers
|
||||||
oauthlib==3.2.2 \
|
oauthlib==3.2.2 \
|
||||||
@ -1012,6 +1031,7 @@ opencv-python==4.6.0.66 \
|
|||||||
--hash=sha256:f482e78de6e7b0b060ff994ffd859bddc3f7f382bb2019ef157b0ea8ca8712f5
|
--hash=sha256:f482e78de6e7b0b060ff994ffd859bddc3f7f382bb2019ef157b0ea8ca8712f5
|
||||||
# via
|
# via
|
||||||
# basicsr
|
# basicsr
|
||||||
|
# clipseg
|
||||||
# facexlib
|
# facexlib
|
||||||
# gfpgan
|
# gfpgan
|
||||||
# realesrgan
|
# realesrgan
|
||||||
@ -1074,6 +1094,10 @@ pandas==1.5.1 \
|
|||||||
pathtools==0.1.2 \
|
pathtools==0.1.2 \
|
||||||
--hash=sha256:7c35c5421a39bb82e58018febd90e3b6e5db34c5443aaaf742b3f33d4655f1c0
|
--hash=sha256:7c35c5421a39bb82e58018febd90e3b6e5db34c5443aaaf742b3f33d4655f1c0
|
||||||
# via wandb
|
# via wandb
|
||||||
|
picklescan==0.0.5 \
|
||||||
|
--hash=sha256:368cf1b9a075bc1b6460ad82b694f260532b836c82f99d13846cd36e1bbe7f9a \
|
||||||
|
--hash=sha256:57153eca04d5df5009f2cdd595aef261b8a6f27e03046a1c84f672aa6869c592
|
||||||
|
# via -r binary_installer/requirements.in
|
||||||
pillow==9.3.0 \
|
pillow==9.3.0 \
|
||||||
--hash=sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040 \
|
--hash=sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040 \
|
||||||
--hash=sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8 \
|
--hash=sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8 \
|
||||||
@ -1144,6 +1168,7 @@ pillow==9.3.0 \
|
|||||||
# imageio
|
# imageio
|
||||||
# k-diffusion
|
# k-diffusion
|
||||||
# matplotlib
|
# matplotlib
|
||||||
|
# pypatchmatch
|
||||||
# realesrgan
|
# realesrgan
|
||||||
# scikit-image
|
# scikit-image
|
||||||
# streamlit
|
# streamlit
|
||||||
@ -1279,10 +1304,13 @@ pyparsing==3.0.9 \
|
|||||||
# via
|
# via
|
||||||
# matplotlib
|
# matplotlib
|
||||||
# packaging
|
# packaging
|
||||||
|
pypatchmatch @ https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip \
|
||||||
|
--hash=sha256:4ad6ec95379e7d122d494ff76633cc7cf9b71330d5efda147fceba81e3dc6cd2
|
||||||
|
# via -r binary_installer/requirements.in
|
||||||
pyreadline3==3.4.1 \
|
pyreadline3==3.4.1 \
|
||||||
--hash=sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae \
|
--hash=sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae \
|
||||||
--hash=sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb
|
--hash=sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb
|
||||||
# via -r requirements.in
|
# via -r binary_installer/requirements.in
|
||||||
pyrsistent==0.19.2 \
|
pyrsistent==0.19.2 \
|
||||||
--hash=sha256:055ab45d5911d7cae397dc418808d8802fb95262751872c841c170b0dbf51eed \
|
--hash=sha256:055ab45d5911d7cae397dc418808d8802fb95262751872c841c170b0dbf51eed \
|
||||||
--hash=sha256:111156137b2e71f3a9936baf27cb322e8024dac3dc54ec7fb9f0bcf3249e68bb \
|
--hash=sha256:111156137b2e71f3a9936baf27cb322e8024dac3dc54ec7fb9f0bcf3249e68bb \
|
||||||
@ -1419,7 +1447,7 @@ qudida==0.0.4 \
|
|||||||
realesrgan==0.3.0 \
|
realesrgan==0.3.0 \
|
||||||
--hash=sha256:0d36da96ab9f447071606e91f502ccdfb08f80cc82ee4f8caf720c7745ccec7e \
|
--hash=sha256:0d36da96ab9f447071606e91f502ccdfb08f80cc82ee4f8caf720c7745ccec7e \
|
||||||
--hash=sha256:59336c16c30dd5130eff350dd27424acb9b7281d18a6810130e265606c9a6088
|
--hash=sha256:59336c16c30dd5130eff350dd27424acb9b7281d18a6810130e265606c9a6088
|
||||||
# via -r requirements.in
|
# via -r binary_installer/requirements.in
|
||||||
regex==2022.10.31 \
|
regex==2022.10.31 \
|
||||||
--hash=sha256:052b670fafbe30966bbe5d025e90b2a491f85dfe5b2583a163b5e60a85a321ad \
|
--hash=sha256:052b670fafbe30966bbe5d025e90b2a491f85dfe5b2583a163b5e60a85a321ad \
|
||||||
--hash=sha256:0653d012b3bf45f194e5e6a41df9258811ac8fc395579fa82958a8b76286bea4 \
|
--hash=sha256:0653d012b3bf45f194e5e6a41df9258811ac8fc395579fa82958a8b76286bea4 \
|
||||||
@ -1625,6 +1653,7 @@ scipy==1.9.3 \
|
|||||||
# albumentations
|
# albumentations
|
||||||
# basicsr
|
# basicsr
|
||||||
# clean-fid
|
# clean-fid
|
||||||
|
# clipseg
|
||||||
# facexlib
|
# facexlib
|
||||||
# filterpy
|
# filterpy
|
||||||
# gfpgan
|
# gfpgan
|
||||||
@ -1633,6 +1662,7 @@ scipy==1.9.3 \
|
|||||||
# scikit-learn
|
# scikit-learn
|
||||||
# torch-fidelity
|
# torch-fidelity
|
||||||
# torchdiffeq
|
# torchdiffeq
|
||||||
|
# torchsde
|
||||||
semver==2.13.0 \
|
semver==2.13.0 \
|
||||||
--hash=sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4 \
|
--hash=sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4 \
|
||||||
--hash=sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f
|
--hash=sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f
|
||||||
@ -1640,7 +1670,7 @@ semver==2.13.0 \
|
|||||||
send2trash==1.8.0 \
|
send2trash==1.8.0 \
|
||||||
--hash=sha256:d2c24762fd3759860a0aff155e45871447ea58d2be6bdd39b5c8f966a0c99c2d \
|
--hash=sha256:d2c24762fd3759860a0aff155e45871447ea58d2be6bdd39b5c8f966a0c99c2d \
|
||||||
--hash=sha256:f20eaadfdb517eaca5ce077640cb261c7d2698385a6a0f072a4a5447fd49fa08
|
--hash=sha256:f20eaadfdb517eaca5ce077640cb261c7d2698385a6a0f072a4a5447fd49fa08
|
||||||
# via -r requirements.in
|
# via -r binary_installer/requirements.in
|
||||||
sentry-sdk==1.10.1 \
|
sentry-sdk==1.10.1 \
|
||||||
--hash=sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad \
|
--hash=sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad \
|
||||||
--hash=sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691
|
--hash=sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691
|
||||||
@ -1731,11 +1761,11 @@ smmap==5.0.0 \
|
|||||||
streamlit==1.14.0 \
|
streamlit==1.14.0 \
|
||||||
--hash=sha256:62556d873567e1b3427bcd118a57ee6946619f363bd6bba38df2d1f8225ecba0 \
|
--hash=sha256:62556d873567e1b3427bcd118a57ee6946619f363bd6bba38df2d1f8225ecba0 \
|
||||||
--hash=sha256:e078b8143d150ba721bdb9194218e311c5fe1d6d4156473a2dea6cc848a6c9fc
|
--hash=sha256:e078b8143d150ba721bdb9194218e311c5fe1d6d4156473a2dea6cc848a6c9fc
|
||||||
# via -r requirements.in
|
# via -r binary_installer/requirements.in
|
||||||
taming-transformers-rom1504==0.0.6 \
|
taming-transformers-rom1504==0.0.6 \
|
||||||
--hash=sha256:051b5804c58caa247bcd51d17ddb525b4d5f892a29d42dc460f40e3e9e34e5d8 \
|
--hash=sha256:051b5804c58caa247bcd51d17ddb525b4d5f892a29d42dc460f40e3e9e34e5d8 \
|
||||||
--hash=sha256:73fe5fc1108accee4236ee6976e0987ab236afad0af06cb9f037641a908d2c32
|
--hash=sha256:73fe5fc1108accee4236ee6976e0987ab236afad0af06cb9f037641a908d2c32
|
||||||
# via -r requirements.in
|
# via -r binary_installer/requirements.in
|
||||||
tb-nightly==2.11.0a20221106 \
|
tb-nightly==2.11.0a20221106 \
|
||||||
--hash=sha256:8940457ee42db92f01da8bcdbbea1a476735eda559dde5976f5728919960af4a
|
--hash=sha256:8940457ee42db92f01da8bcdbbea1a476735eda559dde5976f5728919960af4a
|
||||||
# via
|
# via
|
||||||
@ -1760,7 +1790,7 @@ tensorboard-plugin-wit==1.8.1 \
|
|||||||
# tensorboard
|
# tensorboard
|
||||||
test-tube==0.7.5 \
|
test-tube==0.7.5 \
|
||||||
--hash=sha256:1379c33eb8cde3e9b36610f87da0f16c2e06496b1cfebac473df4e7be2faa124
|
--hash=sha256:1379c33eb8cde3e9b36610f87da0f16c2e06496b1cfebac473df4e7be2faa124
|
||||||
# via -r requirements.in
|
# via -r binary_installer/requirements.in
|
||||||
threadpoolctl==3.1.0 \
|
threadpoolctl==3.1.0 \
|
||||||
--hash=sha256:8b99adda265feb6773280df41eece7b2e6561b772d21ffd52e372f999024907b \
|
--hash=sha256:8b99adda265feb6773280df41eece7b2e6561b772d21ffd52e372f999024907b \
|
||||||
--hash=sha256:a335baacfaa4400ae1f0d8e3a58d6674d2f8828e3716bb2802c44955ad391380
|
--hash=sha256:a335baacfaa4400ae1f0d8e3a58d6674d2f8828e3716bb2802c44955ad391380
|
||||||
@ -1810,16 +1840,17 @@ toolz==0.12.0 \
|
|||||||
--hash=sha256:2059bd4148deb1884bb0eb770a3cde70e7f954cfbbdc2285f1f2de01fd21eb6f \
|
--hash=sha256:2059bd4148deb1884bb0eb770a3cde70e7f954cfbbdc2285f1f2de01fd21eb6f \
|
||||||
--hash=sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194
|
--hash=sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194
|
||||||
# via altair
|
# via altair
|
||||||
torch==1.12.1+cu116 \
|
torch==1.12.0+cu116 ; platform_system == "Linux" or platform_system == "Windows" \
|
||||||
--hash=sha256:7725420dabebfcaf44984edce3283eea91f98f0f7d5874bc68c7a164bd8126e3 \
|
--hash=sha256:1d9557d1e871794a31a71c40dec8589d6c3347f3f2953a8dd74cfd58e1ecb52e \
|
||||||
--hash=sha256:832effad8b21109700323a5aa137a2e4bdea711dac3d8491ff542f798dab0101 \
|
--hash=sha256:72538e4505087668a4642f861578dfed470fae5da20b1758b0f34e4a070d6b21 \
|
||||||
--hash=sha256:84f031e4ee25d95368d7531aa58e79da9808d3fa53b4b363ea03a2450b6fd0af \
|
--hash=sha256:74f5b137190a6face6859d630f129289e7fae6a4d9a747430b3b5d5c6297a3ae \
|
||||||
--hash=sha256:b6bc31244aa2818929fbb30c483c221df471e9d856e805c5a1ff72b131ae9e7b \
|
--hash=sha256:7665e906995328746c6f70016ee90cafe50cbf434b6ef576e1de2678929ee63e \
|
||||||
--hash=sha256:b8e8906e770bcad12e67c269e1bcdd7661a8abd96519a4ba643e86440bbcc1bf \
|
--hash=sha256:7ee1899e9afe5f5e35ba46bc70e17735d2c02cedede1fa69a288cc680b5ab3db \
|
||||||
--hash=sha256:bca5a77071d7eb901beb775648b125e6d9279f231d1f23e56530b5a189df8975 \
|
--hash=sha256:97d63afcb6358071737f8325aa933e9db2f30cd2f068591d27d4ea72f3cabad2 \
|
||||||
--hash=sha256:dda312901220895087cc83d3665464a3dc171d04460c61c31af463efbfb54896 \
|
--hash=sha256:aa43d7b54b86f723f17c5c44df1078c59a6149fc4d42fbef08aafab9d61451c9 \
|
||||||
--hash=sha256:fc9b4786ec54be67eaa8b0c7c9999e2f4ae2b89a1c18e41de1515a190440c691
|
--hash=sha256:f772be831447dd01ebd26cbedf619e668d1b269d69bf6b4ff46b1378362bff26
|
||||||
# via
|
# via
|
||||||
|
# -r binary_installer/requirements.in
|
||||||
# accelerate
|
# accelerate
|
||||||
# basicsr
|
# basicsr
|
||||||
# clean-fid
|
# clean-fid
|
||||||
@ -1835,11 +1866,12 @@ torch==1.12.1+cu116 \
|
|||||||
# torch-fidelity
|
# torch-fidelity
|
||||||
# torchdiffeq
|
# torchdiffeq
|
||||||
# torchmetrics
|
# torchmetrics
|
||||||
|
# torchsde
|
||||||
# torchvision
|
# torchvision
|
||||||
torch-fidelity==0.3.0 \
|
torch-fidelity==0.3.0 \
|
||||||
--hash=sha256:3d3e33db98919759cc4f3f24cb27e1e74bdc7c905d90a780630e4e1c18492b66 \
|
--hash=sha256:3d3e33db98919759cc4f3f24cb27e1e74bdc7c905d90a780630e4e1c18492b66 \
|
||||||
--hash=sha256:d01284825595feb7dc3eae3dc9a0d8ced02be764813a3483f109bc142b52a1d3
|
--hash=sha256:d01284825595feb7dc3eae3dc9a0d8ced02be764813a3483f109bc142b52a1d3
|
||||||
# via -r requirements.in
|
# via -r binary_installer/requirements.in
|
||||||
torchdiffeq==0.2.3 \
|
torchdiffeq==0.2.3 \
|
||||||
--hash=sha256:b5b01ec1294a2d8d5f77e567bf17c5de1237c0573cb94deefa88326f0e18c338 \
|
--hash=sha256:b5b01ec1294a2d8d5f77e567bf17c5de1237c0573cb94deefa88326f0e18c338 \
|
||||||
--hash=sha256:fe75f434b9090ac0c27702e02bed21472b0f87035be6581f51edc5d4013ea31a
|
--hash=sha256:fe75f434b9090ac0c27702e02bed21472b0f87035be6581f51edc5d4013ea31a
|
||||||
@ -1848,17 +1880,21 @@ torchmetrics==0.10.2 \
|
|||||||
--hash=sha256:43757d82266969906fc74b6e80766fcb2a0d52d6c3d09e3b7c98cf3b733fd20c \
|
--hash=sha256:43757d82266969906fc74b6e80766fcb2a0d52d6c3d09e3b7c98cf3b733fd20c \
|
||||||
--hash=sha256:daa29d96bff5cff04d80eec5b9f5076993d6ac9c2d2163e88b6b31f8d38f7c25
|
--hash=sha256:daa29d96bff5cff04d80eec5b9f5076993d6ac9c2d2163e88b6b31f8d38f7c25
|
||||||
# via pytorch-lightning
|
# via pytorch-lightning
|
||||||
torchvision==0.13.1+cu116 ; platform_system == "Linux" or platform_system == "Windows" \
|
torchsde==0.2.5 \
|
||||||
--hash=sha256:0c9a2b605ac30fcf475d60f79ba378af0073a22de585453f8c3dd6c1452ab9bc \
|
--hash=sha256:222be9e15610d37a4b5a71cfa0c442178f9fd9ca02f6522a3e11c370b3d0906b \
|
||||||
--hash=sha256:75986abe572138258eb9795cb4cd73f40b2bdf8374fefa1af6ff6bb0dbc972c6 \
|
--hash=sha256:4c34373a94a357bdf60bbfee00c850f3563d634491555820b900c9a4f7eff300
|
||||||
--hash=sha256:8a4c395bb72cf51eb4318c6861c9a5ea490d48ec36a3d767220ef182445449cb \
|
# via k-diffusion
|
||||||
--hash=sha256:92e4685c6010b6b1c228ebb5fe93105d0a71e5b586483a942e04529a43e0bb42 \
|
torchvision==0.13.0+cu116 ; platform_system == "Linux" or platform_system == "Windows" \
|
||||||
--hash=sha256:9ec5654c56a22fe420dc0af0ff5cd31105f583fdb0240043ff26a7cfed7e05fb \
|
--hash=sha256:1696feadf1921c8fa1549bad774221293298288ebedaa14e44bc3e57e964a369 \
|
||||||
--hash=sha256:ba8b7d3c33f63feb29c7dd8c0db68b735d0c9d924ff4e84121b4b20b17cec7a5 \
|
--hash=sha256:572544b108eaf12638f3dca0f496a453c4b8d8256bcc8333d5355df641c0380c \
|
||||||
--hash=sha256:c3ceb2b3f456f0c984af71ef55f8637f178a29dc3e13a66fbb010ceead2891e1 \
|
--hash=sha256:76dbe71be271e2f246d556a8201c6f73a431851045d866c51bd945521817b892 \
|
||||||
--hash=sha256:dcf32f6d998493e76ec21a38bbb856b7402295cf7a67fb09ce5bde7e7e725756
|
--hash=sha256:90b9461f57e1219ca900bfd9e85548b840ec56d57ec331b7a7eb871113b34c0a \
|
||||||
|
--hash=sha256:941a8c958f2fe9184ce522567f4a471b52dd306891870e979fe6569062432258 \
|
||||||
|
--hash=sha256:9ce27c87a8581d00dcef416ec75f8eca9c225d8c36b81150a1f2a60eb70155dc \
|
||||||
|
--hash=sha256:cb6bf0117b8f4b601baeae54e8a6bb5c4942b054835ba997f438ddcb7adcfb90 \
|
||||||
|
--hash=sha256:d1a3c124645e3460b3e50b54eb89a2575a5036bfa618f15dc4f5d635c716069d
|
||||||
# via
|
# via
|
||||||
# -r requirements.in
|
# -r binary_installer/requirements.in
|
||||||
# basicsr
|
# basicsr
|
||||||
# clean-fid
|
# clean-fid
|
||||||
# clip
|
# clip
|
||||||
@ -1897,10 +1933,13 @@ tqdm==4.64.1 \
|
|||||||
# taming-transformers-rom1504
|
# taming-transformers-rom1504
|
||||||
# torch-fidelity
|
# torch-fidelity
|
||||||
# transformers
|
# transformers
|
||||||
|
trampoline==0.1.2 \
|
||||||
|
--hash=sha256:36cc9a4ff9811843d177fc0e0740efbd7da39eadfe6e50c9e2937cbc06d899d9
|
||||||
|
# via torchsde
|
||||||
transformers==4.24.0 \
|
transformers==4.24.0 \
|
||||||
--hash=sha256:486f353a8e594002e48be0e2aba723d96eda839e63bfe274702a4b5eda85559b \
|
--hash=sha256:486f353a8e594002e48be0e2aba723d96eda839e63bfe274702a4b5eda85559b \
|
||||||
--hash=sha256:b7ab50039ef9bf817eff14ab974f306fd20a72350bdc9df3a858fd009419322e
|
--hash=sha256:b7ab50039ef9bf817eff14ab974f306fd20a72350bdc9df3a858fd009419322e
|
||||||
# via -r requirements.in
|
# via -r binary_installer/requirements.in
|
||||||
typing-extensions==4.4.0 \
|
typing-extensions==4.4.0 \
|
||||||
--hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \
|
--hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \
|
||||||
--hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e
|
--hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e
|
||||||
@ -1914,9 +1953,7 @@ typing-extensions==4.4.0 \
|
|||||||
tzdata==2022.6 \
|
tzdata==2022.6 \
|
||||||
--hash=sha256:04a680bdc5b15750c39c12a448885a51134a27ec9af83667663f0b3a1bf3f342 \
|
--hash=sha256:04a680bdc5b15750c39c12a448885a51134a27ec9af83667663f0b3a1bf3f342 \
|
||||||
--hash=sha256:91f11db4503385928c15598c98573e3af07e7229181bee5375bd30f1695ddcae
|
--hash=sha256:91f11db4503385928c15598c98573e3af07e7229181bee5375bd30f1695ddcae
|
||||||
# via
|
# via pytz-deprecation-shim
|
||||||
# pytz-deprecation-shim
|
|
||||||
# tzlocal
|
|
||||||
tzlocal==4.2 \
|
tzlocal==4.2 \
|
||||||
--hash=sha256:89885494684c929d9191c57aa27502afc87a579be5cdd3225c77c463ea043745 \
|
--hash=sha256:89885494684c929d9191c57aa27502afc87a579be5cdd3225c77c463ea043745 \
|
||||||
--hash=sha256:ee5842fa3a795f023514ac2d801c4a81d1743bbe642e3940143326b3a00addd7
|
--hash=sha256:ee5842fa3a795f023514ac2d801c4a81d1743bbe642e3940143326b3a00addd7
|
@ -2,8 +2,9 @@
|
|||||||
# This file is autogenerated by pip-compile with python 3.10
|
# This file is autogenerated by pip-compile with python 3.10
|
||||||
# To update, run:
|
# To update, run:
|
||||||
#
|
#
|
||||||
# pip-compile --allow-unsafe --generate-hashes --output-file=installer/py3.10-windows-x86_64-cuda-reqs.txt requirements.in
|
# pip-compile --allow-unsafe --generate-hashes --output-file=installer/py3.10-windows-x86_64-cuda-reqs.txt installer/requirements.in
|
||||||
#
|
#
|
||||||
|
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
||||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||||
--trusted-host https
|
--trusted-host https
|
||||||
|
|
||||||
@ -13,10 +14,12 @@ absl-py==1.3.0 \
|
|||||||
# via
|
# via
|
||||||
# tb-nightly
|
# tb-nightly
|
||||||
# tensorboard
|
# tensorboard
|
||||||
accelerate==0.13.2 \
|
accelerate==0.14.0 \
|
||||||
--hash=sha256:dd6f08b010077f252dda5a7699d87b02885335c456770939c536e65ff07ed760 \
|
--hash=sha256:31c5bcc40564ef849b5bc1c4424a43ccaf9e26413b7df89c2e36bf81f070fd44 \
|
||||||
--hash=sha256:e22180d7094e4c1bfb05a2b078297c222f6b4fa595fde8916946c3f377cdf019
|
--hash=sha256:b15d562c0889d0cf441b01faa025dfc29b163d061b6cc7d489c2c83b0a55ffab
|
||||||
# via k-diffusion
|
# via
|
||||||
|
# -r installer/requirements.in
|
||||||
|
# k-diffusion
|
||||||
addict==2.4.0 \
|
addict==2.4.0 \
|
||||||
--hash=sha256:249bb56bbfd3cdc2a004ea0ff4c2b6ddc84d53bc2194761636eb314d5cfa5dfc \
|
--hash=sha256:249bb56bbfd3cdc2a004ea0ff4c2b6ddc84d53bc2194761636eb314d5cfa5dfc \
|
||||||
--hash=sha256:b3b2210e0e067a281f5646c8c5db92e99b7231ea8b0eb5f74dbdf9e259d4e494
|
--hash=sha256:b3b2210e0e067a281f5646c8c5db92e99b7231ea8b0eb5f74dbdf9e259d4e494
|
||||||
@ -117,7 +120,7 @@ aiosignal==1.2.0 \
|
|||||||
albumentations==1.3.0 \
|
albumentations==1.3.0 \
|
||||||
--hash=sha256:294165d87d03bc8323e484927f0a5c1a3c64b0e7b9c32a979582a6c93c363bdf \
|
--hash=sha256:294165d87d03bc8323e484927f0a5c1a3c64b0e7b9c32a979582a6c93c363bdf \
|
||||||
--hash=sha256:be1af36832c8893314f2a5550e8ac19801e04770734c1b70fa3c996b41f37bed
|
--hash=sha256:be1af36832c8893314f2a5550e8ac19801e04770734c1b70fa3c996b41f37bed
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
altair==4.2.0 \
|
altair==4.2.0 \
|
||||||
--hash=sha256:0c724848ae53410c13fa28be2b3b9a9dcb7b5caa1a70f7f217bd663bb419935a \
|
--hash=sha256:0c724848ae53410c13fa28be2b3b9a9dcb7b5caa1a70f7f217bd663bb419935a \
|
||||||
--hash=sha256:d87d9372e63b48cd96b2a6415f0cf9457f50162ab79dc7a31cd7e024dd840026
|
--hash=sha256:d87d9372e63b48cd96b2a6415f0cf9457f50162ab79dc7a31cd7e024dd840026
|
||||||
@ -148,6 +151,10 @@ blinker==1.5 \
|
|||||||
--hash=sha256:1eb563df6fdbc39eeddc177d953203f99f097e9bf0e2b8f9f3cf18b6ca425e36 \
|
--hash=sha256:1eb563df6fdbc39eeddc177d953203f99f097e9bf0e2b8f9f3cf18b6ca425e36 \
|
||||||
--hash=sha256:923e5e2f69c155f2cc42dafbbd70e16e3fde24d2d4aa2ab72fbe386238892462
|
--hash=sha256:923e5e2f69c155f2cc42dafbbd70e16e3fde24d2d4aa2ab72fbe386238892462
|
||||||
# via streamlit
|
# via streamlit
|
||||||
|
boltons==21.0.0 \
|
||||||
|
--hash=sha256:65e70a79a731a7fe6e98592ecfb5ccf2115873d01dbc576079874629e5c90f13 \
|
||||||
|
--hash=sha256:b9bb7b58b2b420bbe11a6025fdef6d3e5edc9f76a42fb467afe7ca212ef9948b
|
||||||
|
# via torchsde
|
||||||
cachetools==5.2.0 \
|
cachetools==5.2.0 \
|
||||||
--hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \
|
--hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \
|
||||||
--hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db
|
--hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db
|
||||||
@ -180,7 +187,12 @@ click==8.1.3 \
|
|||||||
# wandb
|
# wandb
|
||||||
clip @ https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip \
|
clip @ https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip \
|
||||||
--hash=sha256:b5842c25da441d6c581b53a5c60e0c2127ebafe0f746f8e15561a006c6c3be6a
|
--hash=sha256:b5842c25da441d6c581b53a5c60e0c2127ebafe0f746f8e15561a006c6c3be6a
|
||||||
# via -r requirements.in
|
# via
|
||||||
|
# -r installer/requirements.in
|
||||||
|
# clipseg
|
||||||
|
clipseg @ https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip \
|
||||||
|
--hash=sha256:14f43ed42f90be3fe57f06de483cb8be0f67f87a6f62a011339d45a39f4b4189
|
||||||
|
# via -r installer/requirements.in
|
||||||
colorama==0.4.6 \
|
colorama==0.4.6 \
|
||||||
--hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \
|
--hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \
|
||||||
--hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6
|
--hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6
|
||||||
@ -273,7 +285,7 @@ decorator==5.1.1 \
|
|||||||
diffusers==0.7.2 \
|
diffusers==0.7.2 \
|
||||||
--hash=sha256:4a5f8b3a5fbd936bba7d459611cb35ec62875030367be32b232f9e19543e25a9 \
|
--hash=sha256:4a5f8b3a5fbd936bba7d459611cb35ec62875030367be32b232f9e19543e25a9 \
|
||||||
--hash=sha256:fb814ffd150cc6f470380b8c6a521181a77beb2f44134d2aad2e4cd8aa2ced0e
|
--hash=sha256:fb814ffd150cc6f470380b8c6a521181a77beb2f44134d2aad2e4cd8aa2ced0e
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
dnspython==2.2.1 \
|
dnspython==2.2.1 \
|
||||||
--hash=sha256:0f7569a4a6ff151958b64304071d370daa3243d15941a7beedf0c9fe5105603e \
|
--hash=sha256:0f7569a4a6ff151958b64304071d370daa3243d15941a7beedf0c9fe5105603e \
|
||||||
--hash=sha256:a851e51367fb93e9e1361732c1d60dab63eff98712e503ea7d92e6eccb109b4f
|
--hash=sha256:a851e51367fb93e9e1361732c1d60dab63eff98712e503ea7d92e6eccb109b4f
|
||||||
@ -293,7 +305,7 @@ entrypoints==0.4 \
|
|||||||
eventlet==0.33.1 \
|
eventlet==0.33.1 \
|
||||||
--hash=sha256:a085922698e5029f820cf311a648ac324d73cec0e4792877609d978a4b5bbf31 \
|
--hash=sha256:a085922698e5029f820cf311a648ac324d73cec0e4792877609d978a4b5bbf31 \
|
||||||
--hash=sha256:afbe17f06a58491e9aebd7a4a03e70b0b63fd4cf76d8307bae07f280479b1515
|
--hash=sha256:afbe17f06a58491e9aebd7a4a03e70b0b63fd4cf76d8307bae07f280479b1515
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
facexlib==0.2.5 \
|
facexlib==0.2.5 \
|
||||||
--hash=sha256:31e20cc4ed5d63562d380e4564bae14ac0d5d1899a079bad87621e13564567e4 \
|
--hash=sha256:31e20cc4ed5d63562d380e4564bae14ac0d5d1899a079bad87621e13564567e4 \
|
||||||
--hash=sha256:cc7ceb56c5424319c47223cf75eef6828c34c66082707c6eb35b95d39779f02d
|
--hash=sha256:cc7ceb56c5424319c47223cf75eef6828c34c66082707c6eb35b95d39779f02d
|
||||||
@ -319,15 +331,15 @@ flask==2.2.2 \
|
|||||||
flask-cors==3.0.10 \
|
flask-cors==3.0.10 \
|
||||||
--hash=sha256:74efc975af1194fc7891ff5cd85b0f7478be4f7f59fe158102e91abb72bb4438 \
|
--hash=sha256:74efc975af1194fc7891ff5cd85b0f7478be4f7f59fe158102e91abb72bb4438 \
|
||||||
--hash=sha256:b60839393f3b84a0f3746f6cdca56c1ad7426aa738b70d6c61375857823181de
|
--hash=sha256:b60839393f3b84a0f3746f6cdca56c1ad7426aa738b70d6c61375857823181de
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
flask-socketio==5.3.1 \
|
flask-socketio==5.3.1 \
|
||||||
--hash=sha256:fd0ed0fc1341671d92d5f5b2f5503916deb7aa7e2940e6636cfa2c087c828bf9 \
|
--hash=sha256:fd0ed0fc1341671d92d5f5b2f5503916deb7aa7e2940e6636cfa2c087c828bf9 \
|
||||||
--hash=sha256:ff0c721f20bff1e2cfba77948727a8db48f187e89a72fe50c34478ce6efb3353
|
--hash=sha256:ff0c721f20bff1e2cfba77948727a8db48f187e89a72fe50c34478ce6efb3353
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
flaskwebgui==0.3.7 \
|
flaskwebgui==0.3.7 \
|
||||||
--hash=sha256:4a69955308eaa8bb256ba04a994dc8f58a48dcd6f9599694ab1bcd9f43d88a5d \
|
--hash=sha256:4a69955308eaa8bb256ba04a994dc8f58a48dcd6f9599694ab1bcd9f43d88a5d \
|
||||||
--hash=sha256:535974ce2672dcc74787c254de24cceed4101be75d96952dae82014dd57f061e
|
--hash=sha256:535974ce2672dcc74787c254de24cceed4101be75d96952dae82014dd57f061e
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
fonttools==4.38.0 \
|
fonttools==4.38.0 \
|
||||||
--hash=sha256:2bb244009f9bf3fa100fc3ead6aeb99febe5985fa20afbfbaa2f8946c2fbdaf1 \
|
--hash=sha256:2bb244009f9bf3fa100fc3ead6aeb99febe5985fa20afbfbaa2f8946c2fbdaf1 \
|
||||||
--hash=sha256:820466f43c8be8c3009aef8b87e785014133508f0de64ec469e4efb643ae54fb
|
--hash=sha256:820466f43c8be8c3009aef8b87e785014133508f0de64ec469e4efb643ae54fb
|
||||||
@ -411,11 +423,11 @@ future==0.18.2 \
|
|||||||
getpass-asterisk==1.0.1 \
|
getpass-asterisk==1.0.1 \
|
||||||
--hash=sha256:20d45cafda0066d761961e0919728526baf7bb5151fbf48a7d5ea4034127d857 \
|
--hash=sha256:20d45cafda0066d761961e0919728526baf7bb5151fbf48a7d5ea4034127d857 \
|
||||||
--hash=sha256:7cc357a924cf62fa4e15b73cb4e5e30685c9084e464ffdc3fd9000a2b54ea9e9
|
--hash=sha256:7cc357a924cf62fa4e15b73cb4e5e30685c9084e464ffdc3fd9000a2b54ea9e9
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
gfpgan @ https://github.com/TencentARC/GFPGAN/archive/2eac2033893ca7f427f4035d80fe95b92649ac56.zip \
|
gfpgan @ https://github.com/TencentARC/GFPGAN/archive/2eac2033893ca7f427f4035d80fe95b92649ac56.zip \
|
||||||
--hash=sha256:79e6d71c8f1df7c7ccb0ac6b9a2ccb615ad5cde818c8b6f285a8711c05aebf85
|
--hash=sha256:79e6d71c8f1df7c7ccb0ac6b9a2ccb615ad5cde818c8b6f285a8711c05aebf85
|
||||||
# via
|
# via
|
||||||
# -r requirements.in
|
# -r installer/requirements.in
|
||||||
# realesrgan
|
# realesrgan
|
||||||
gitdb==4.0.9 \
|
gitdb==4.0.9 \
|
||||||
--hash=sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd \
|
--hash=sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd \
|
||||||
@ -576,7 +588,7 @@ imageio-ffmpeg==0.4.7 \
|
|||||||
--hash=sha256:7a08838f97f363e37ca41821b864fd3fdc99ab1fe2421040c78eb5f56a9e723e \
|
--hash=sha256:7a08838f97f363e37ca41821b864fd3fdc99ab1fe2421040c78eb5f56a9e723e \
|
||||||
--hash=sha256:8e724d12dfe83e2a6eb39619e820243ca96c81c47c2648e66e05f7ee24e14312 \
|
--hash=sha256:8e724d12dfe83e2a6eb39619e820243ca96c81c47c2648e66e05f7ee24e14312 \
|
||||||
--hash=sha256:fc60686ef03c2d0f842901b206223c30051a6a120384458761390104470846fd
|
--hash=sha256:fc60686ef03c2d0f842901b206223c30051a6a120384458761390104470846fd
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
importlib-metadata==5.0.0 \
|
importlib-metadata==5.0.0 \
|
||||||
--hash=sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab \
|
--hash=sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab \
|
||||||
--hash=sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43
|
--hash=sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43
|
||||||
@ -607,9 +619,9 @@ jsonschema==4.17.0 \
|
|||||||
# via
|
# via
|
||||||
# altair
|
# altair
|
||||||
# jsonmerge
|
# jsonmerge
|
||||||
k-diffusion @ https://github.com/invoke-ai/k-diffusion/archive/7f16b2c33411f26b3eae78d10648d625cb0c1095.zip \
|
k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip \
|
||||||
--hash=sha256:c3f2c84036aa98c3abf4552fafab04df5ca472aa639982795e05bb1db43ce5e4
|
--hash=sha256:8eac5cdc08736e6d61908a1b2948f2b2f62691b01dc1aab978bddb3451af0d66
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
kiwisolver==1.4.4 \
|
kiwisolver==1.4.4 \
|
||||||
--hash=sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b \
|
--hash=sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b \
|
||||||
--hash=sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166 \
|
--hash=sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166 \
|
||||||
@ -837,7 +849,9 @@ matplotlib==3.6.2 \
|
|||||||
--hash=sha256:ec9be0f4826cdb3a3a517509dcc5f87f370251b76362051ab59e42b6b765f8c4 \
|
--hash=sha256:ec9be0f4826cdb3a3a517509dcc5f87f370251b76362051ab59e42b6b765f8c4 \
|
||||||
--hash=sha256:f04f97797df35e442ed09f529ad1235d1f1c0f30878e2fe09a2676b71a8801e0 \
|
--hash=sha256:f04f97797df35e442ed09f529ad1235d1f1c0f30878e2fe09a2676b71a8801e0 \
|
||||||
--hash=sha256:f41e57ad63d336fe50d3a67bb8eaa26c09f6dda6a59f76777a99b8ccd8e26aec
|
--hash=sha256:f41e57ad63d336fe50d3a67bb8eaa26c09f6dda6a59f76777a99b8ccd8e26aec
|
||||||
# via filterpy
|
# via
|
||||||
|
# clipseg
|
||||||
|
# filterpy
|
||||||
multidict==6.0.2 \
|
multidict==6.0.2 \
|
||||||
--hash=sha256:0327292e745a880459ef71be14e709aaea2f783f3537588fb4ed09b6c01bca60 \
|
--hash=sha256:0327292e745a880459ef71be14e709aaea2f783f3537588fb4ed09b6c01bca60 \
|
||||||
--hash=sha256:041b81a5f6b38244b34dc18c7b6aba91f9cdaf854d9a39e5ff0b58e2b5773b9c \
|
--hash=sha256:041b81a5f6b38244b34dc18c7b6aba91f9cdaf854d9a39e5ff0b58e2b5773b9c \
|
||||||
@ -970,6 +984,7 @@ numpy==1.23.4 \
|
|||||||
# altair
|
# altair
|
||||||
# basicsr
|
# basicsr
|
||||||
# clean-fid
|
# clean-fid
|
||||||
|
# clipseg
|
||||||
# contourpy
|
# contourpy
|
||||||
# diffusers
|
# diffusers
|
||||||
# facexlib
|
# facexlib
|
||||||
@ -983,6 +998,7 @@ numpy==1.23.4 \
|
|||||||
# pandas
|
# pandas
|
||||||
# pyarrow
|
# pyarrow
|
||||||
# pydeck
|
# pydeck
|
||||||
|
# pypatchmatch
|
||||||
# pytorch-lightning
|
# pytorch-lightning
|
||||||
# pywavelets
|
# pywavelets
|
||||||
# qudida
|
# qudida
|
||||||
@ -998,6 +1014,7 @@ numpy==1.23.4 \
|
|||||||
# tifffile
|
# tifffile
|
||||||
# torch-fidelity
|
# torch-fidelity
|
||||||
# torchmetrics
|
# torchmetrics
|
||||||
|
# torchsde
|
||||||
# torchvision
|
# torchvision
|
||||||
# transformers
|
# transformers
|
||||||
oauthlib==3.2.2 \
|
oauthlib==3.2.2 \
|
||||||
@ -1018,6 +1035,7 @@ opencv-python==4.6.0.66 \
|
|||||||
--hash=sha256:f482e78de6e7b0b060ff994ffd859bddc3f7f382bb2019ef157b0ea8ca8712f5
|
--hash=sha256:f482e78de6e7b0b060ff994ffd859bddc3f7f382bb2019ef157b0ea8ca8712f5
|
||||||
# via
|
# via
|
||||||
# basicsr
|
# basicsr
|
||||||
|
# clipseg
|
||||||
# facexlib
|
# facexlib
|
||||||
# gfpgan
|
# gfpgan
|
||||||
# realesrgan
|
# realesrgan
|
||||||
@ -1080,6 +1098,10 @@ pandas==1.5.1 \
|
|||||||
pathtools==0.1.2 \
|
pathtools==0.1.2 \
|
||||||
--hash=sha256:7c35c5421a39bb82e58018febd90e3b6e5db34c5443aaaf742b3f33d4655f1c0
|
--hash=sha256:7c35c5421a39bb82e58018febd90e3b6e5db34c5443aaaf742b3f33d4655f1c0
|
||||||
# via wandb
|
# via wandb
|
||||||
|
picklescan==0.0.5 \
|
||||||
|
--hash=sha256:368cf1b9a075bc1b6460ad82b694f260532b836c82f99d13846cd36e1bbe7f9a \
|
||||||
|
--hash=sha256:57153eca04d5df5009f2cdd595aef261b8a6f27e03046a1c84f672aa6869c592
|
||||||
|
# via -r installer/requirements.in
|
||||||
pillow==9.3.0 \
|
pillow==9.3.0 \
|
||||||
--hash=sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040 \
|
--hash=sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040 \
|
||||||
--hash=sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8 \
|
--hash=sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8 \
|
||||||
@ -1150,6 +1172,7 @@ pillow==9.3.0 \
|
|||||||
# imageio
|
# imageio
|
||||||
# k-diffusion
|
# k-diffusion
|
||||||
# matplotlib
|
# matplotlib
|
||||||
|
# pypatchmatch
|
||||||
# realesrgan
|
# realesrgan
|
||||||
# scikit-image
|
# scikit-image
|
||||||
# streamlit
|
# streamlit
|
||||||
@ -1285,10 +1308,13 @@ pyparsing==3.0.9 \
|
|||||||
# via
|
# via
|
||||||
# matplotlib
|
# matplotlib
|
||||||
# packaging
|
# packaging
|
||||||
|
pypatchmatch @ https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip \
|
||||||
|
--hash=sha256:4ad6ec95379e7d122d494ff76633cc7cf9b71330d5efda147fceba81e3dc6cd2
|
||||||
|
# via -r installer/requirements.in
|
||||||
pyreadline3==3.4.1 \
|
pyreadline3==3.4.1 \
|
||||||
--hash=sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae \
|
--hash=sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae \
|
||||||
--hash=sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb
|
--hash=sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
pyrsistent==0.19.2 \
|
pyrsistent==0.19.2 \
|
||||||
--hash=sha256:055ab45d5911d7cae397dc418808d8802fb95262751872c841c170b0dbf51eed \
|
--hash=sha256:055ab45d5911d7cae397dc418808d8802fb95262751872c841c170b0dbf51eed \
|
||||||
--hash=sha256:111156137b2e71f3a9936baf27cb322e8024dac3dc54ec7fb9f0bcf3249e68bb \
|
--hash=sha256:111156137b2e71f3a9936baf27cb322e8024dac3dc54ec7fb9f0bcf3249e68bb \
|
||||||
@ -1425,7 +1451,7 @@ qudida==0.0.4 \
|
|||||||
realesrgan==0.3.0 \
|
realesrgan==0.3.0 \
|
||||||
--hash=sha256:0d36da96ab9f447071606e91f502ccdfb08f80cc82ee4f8caf720c7745ccec7e \
|
--hash=sha256:0d36da96ab9f447071606e91f502ccdfb08f80cc82ee4f8caf720c7745ccec7e \
|
||||||
--hash=sha256:59336c16c30dd5130eff350dd27424acb9b7281d18a6810130e265606c9a6088
|
--hash=sha256:59336c16c30dd5130eff350dd27424acb9b7281d18a6810130e265606c9a6088
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
regex==2022.10.31 \
|
regex==2022.10.31 \
|
||||||
--hash=sha256:052b670fafbe30966bbe5d025e90b2a491f85dfe5b2583a163b5e60a85a321ad \
|
--hash=sha256:052b670fafbe30966bbe5d025e90b2a491f85dfe5b2583a163b5e60a85a321ad \
|
||||||
--hash=sha256:0653d012b3bf45f194e5e6a41df9258811ac8fc395579fa82958a8b76286bea4 \
|
--hash=sha256:0653d012b3bf45f194e5e6a41df9258811ac8fc395579fa82958a8b76286bea4 \
|
||||||
@ -1631,6 +1657,7 @@ scipy==1.9.3 \
|
|||||||
# albumentations
|
# albumentations
|
||||||
# basicsr
|
# basicsr
|
||||||
# clean-fid
|
# clean-fid
|
||||||
|
# clipseg
|
||||||
# facexlib
|
# facexlib
|
||||||
# filterpy
|
# filterpy
|
||||||
# gfpgan
|
# gfpgan
|
||||||
@ -1639,6 +1666,7 @@ scipy==1.9.3 \
|
|||||||
# scikit-learn
|
# scikit-learn
|
||||||
# torch-fidelity
|
# torch-fidelity
|
||||||
# torchdiffeq
|
# torchdiffeq
|
||||||
|
# torchsde
|
||||||
semver==2.13.0 \
|
semver==2.13.0 \
|
||||||
--hash=sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4 \
|
--hash=sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4 \
|
||||||
--hash=sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f
|
--hash=sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f
|
||||||
@ -1646,7 +1674,7 @@ semver==2.13.0 \
|
|||||||
send2trash==1.8.0 \
|
send2trash==1.8.0 \
|
||||||
--hash=sha256:d2c24762fd3759860a0aff155e45871447ea58d2be6bdd39b5c8f966a0c99c2d \
|
--hash=sha256:d2c24762fd3759860a0aff155e45871447ea58d2be6bdd39b5c8f966a0c99c2d \
|
||||||
--hash=sha256:f20eaadfdb517eaca5ce077640cb261c7d2698385a6a0f072a4a5447fd49fa08
|
--hash=sha256:f20eaadfdb517eaca5ce077640cb261c7d2698385a6a0f072a4a5447fd49fa08
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
sentry-sdk==1.10.1 \
|
sentry-sdk==1.10.1 \
|
||||||
--hash=sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad \
|
--hash=sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad \
|
||||||
--hash=sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691
|
--hash=sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691
|
||||||
@ -1737,11 +1765,11 @@ smmap==5.0.0 \
|
|||||||
streamlit==1.14.0 \
|
streamlit==1.14.0 \
|
||||||
--hash=sha256:62556d873567e1b3427bcd118a57ee6946619f363bd6bba38df2d1f8225ecba0 \
|
--hash=sha256:62556d873567e1b3427bcd118a57ee6946619f363bd6bba38df2d1f8225ecba0 \
|
||||||
--hash=sha256:e078b8143d150ba721bdb9194218e311c5fe1d6d4156473a2dea6cc848a6c9fc
|
--hash=sha256:e078b8143d150ba721bdb9194218e311c5fe1d6d4156473a2dea6cc848a6c9fc
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
taming-transformers-rom1504==0.0.6 \
|
taming-transformers-rom1504==0.0.6 \
|
||||||
--hash=sha256:051b5804c58caa247bcd51d17ddb525b4d5f892a29d42dc460f40e3e9e34e5d8 \
|
--hash=sha256:051b5804c58caa247bcd51d17ddb525b4d5f892a29d42dc460f40e3e9e34e5d8 \
|
||||||
--hash=sha256:73fe5fc1108accee4236ee6976e0987ab236afad0af06cb9f037641a908d2c32
|
--hash=sha256:73fe5fc1108accee4236ee6976e0987ab236afad0af06cb9f037641a908d2c32
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
tb-nightly==2.11.0a20221106 \
|
tb-nightly==2.11.0a20221106 \
|
||||||
--hash=sha256:8940457ee42db92f01da8bcdbbea1a476735eda559dde5976f5728919960af4a
|
--hash=sha256:8940457ee42db92f01da8bcdbbea1a476735eda559dde5976f5728919960af4a
|
||||||
# via
|
# via
|
||||||
@ -1766,7 +1794,7 @@ tensorboard-plugin-wit==1.8.1 \
|
|||||||
# tensorboard
|
# tensorboard
|
||||||
test-tube==0.7.5 \
|
test-tube==0.7.5 \
|
||||||
--hash=sha256:1379c33eb8cde3e9b36610f87da0f16c2e06496b1cfebac473df4e7be2faa124
|
--hash=sha256:1379c33eb8cde3e9b36610f87da0f16c2e06496b1cfebac473df4e7be2faa124
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
threadpoolctl==3.1.0 \
|
threadpoolctl==3.1.0 \
|
||||||
--hash=sha256:8b99adda265feb6773280df41eece7b2e6561b772d21ffd52e372f999024907b \
|
--hash=sha256:8b99adda265feb6773280df41eece7b2e6561b772d21ffd52e372f999024907b \
|
||||||
--hash=sha256:a335baacfaa4400ae1f0d8e3a58d6674d2f8828e3716bb2802c44955ad391380
|
--hash=sha256:a335baacfaa4400ae1f0d8e3a58d6674d2f8828e3716bb2802c44955ad391380
|
||||||
@ -1816,16 +1844,17 @@ toolz==0.12.0 \
|
|||||||
--hash=sha256:2059bd4148deb1884bb0eb770a3cde70e7f954cfbbdc2285f1f2de01fd21eb6f \
|
--hash=sha256:2059bd4148deb1884bb0eb770a3cde70e7f954cfbbdc2285f1f2de01fd21eb6f \
|
||||||
--hash=sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194
|
--hash=sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194
|
||||||
# via altair
|
# via altair
|
||||||
torch==1.12.1+cu116 \
|
torch==1.12.0+cu116 ; platform_system == "Linux" or platform_system == "Windows" \
|
||||||
--hash=sha256:7725420dabebfcaf44984edce3283eea91f98f0f7d5874bc68c7a164bd8126e3 \
|
--hash=sha256:1d9557d1e871794a31a71c40dec8589d6c3347f3f2953a8dd74cfd58e1ecb52e \
|
||||||
--hash=sha256:832effad8b21109700323a5aa137a2e4bdea711dac3d8491ff542f798dab0101 \
|
--hash=sha256:72538e4505087668a4642f861578dfed470fae5da20b1758b0f34e4a070d6b21 \
|
||||||
--hash=sha256:84f031e4ee25d95368d7531aa58e79da9808d3fa53b4b363ea03a2450b6fd0af \
|
--hash=sha256:74f5b137190a6face6859d630f129289e7fae6a4d9a747430b3b5d5c6297a3ae \
|
||||||
--hash=sha256:b6bc31244aa2818929fbb30c483c221df471e9d856e805c5a1ff72b131ae9e7b \
|
--hash=sha256:7665e906995328746c6f70016ee90cafe50cbf434b6ef576e1de2678929ee63e \
|
||||||
--hash=sha256:b8e8906e770bcad12e67c269e1bcdd7661a8abd96519a4ba643e86440bbcc1bf \
|
--hash=sha256:7ee1899e9afe5f5e35ba46bc70e17735d2c02cedede1fa69a288cc680b5ab3db \
|
||||||
--hash=sha256:bca5a77071d7eb901beb775648b125e6d9279f231d1f23e56530b5a189df8975 \
|
--hash=sha256:97d63afcb6358071737f8325aa933e9db2f30cd2f068591d27d4ea72f3cabad2 \
|
||||||
--hash=sha256:dda312901220895087cc83d3665464a3dc171d04460c61c31af463efbfb54896 \
|
--hash=sha256:aa43d7b54b86f723f17c5c44df1078c59a6149fc4d42fbef08aafab9d61451c9 \
|
||||||
--hash=sha256:fc9b4786ec54be67eaa8b0c7c9999e2f4ae2b89a1c18e41de1515a190440c691
|
--hash=sha256:f772be831447dd01ebd26cbedf619e668d1b269d69bf6b4ff46b1378362bff26
|
||||||
# via
|
# via
|
||||||
|
# -r installer/requirements.in
|
||||||
# accelerate
|
# accelerate
|
||||||
# basicsr
|
# basicsr
|
||||||
# clean-fid
|
# clean-fid
|
||||||
@ -1841,11 +1870,12 @@ torch==1.12.1+cu116 \
|
|||||||
# torch-fidelity
|
# torch-fidelity
|
||||||
# torchdiffeq
|
# torchdiffeq
|
||||||
# torchmetrics
|
# torchmetrics
|
||||||
|
# torchsde
|
||||||
# torchvision
|
# torchvision
|
||||||
torch-fidelity==0.3.0 \
|
torch-fidelity==0.3.0 \
|
||||||
--hash=sha256:3d3e33db98919759cc4f3f24cb27e1e74bdc7c905d90a780630e4e1c18492b66 \
|
--hash=sha256:3d3e33db98919759cc4f3f24cb27e1e74bdc7c905d90a780630e4e1c18492b66 \
|
||||||
--hash=sha256:d01284825595feb7dc3eae3dc9a0d8ced02be764813a3483f109bc142b52a1d3
|
--hash=sha256:d01284825595feb7dc3eae3dc9a0d8ced02be764813a3483f109bc142b52a1d3
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
torchdiffeq==0.2.3 \
|
torchdiffeq==0.2.3 \
|
||||||
--hash=sha256:b5b01ec1294a2d8d5f77e567bf17c5de1237c0573cb94deefa88326f0e18c338 \
|
--hash=sha256:b5b01ec1294a2d8d5f77e567bf17c5de1237c0573cb94deefa88326f0e18c338 \
|
||||||
--hash=sha256:fe75f434b9090ac0c27702e02bed21472b0f87035be6581f51edc5d4013ea31a
|
--hash=sha256:fe75f434b9090ac0c27702e02bed21472b0f87035be6581f51edc5d4013ea31a
|
||||||
@ -1854,17 +1884,21 @@ torchmetrics==0.10.2 \
|
|||||||
--hash=sha256:43757d82266969906fc74b6e80766fcb2a0d52d6c3d09e3b7c98cf3b733fd20c \
|
--hash=sha256:43757d82266969906fc74b6e80766fcb2a0d52d6c3d09e3b7c98cf3b733fd20c \
|
||||||
--hash=sha256:daa29d96bff5cff04d80eec5b9f5076993d6ac9c2d2163e88b6b31f8d38f7c25
|
--hash=sha256:daa29d96bff5cff04d80eec5b9f5076993d6ac9c2d2163e88b6b31f8d38f7c25
|
||||||
# via pytorch-lightning
|
# via pytorch-lightning
|
||||||
torchvision==0.13.1+cu116 ; platform_system == "Linux" or platform_system == "Windows" \
|
torchsde==0.2.5 \
|
||||||
--hash=sha256:0c9a2b605ac30fcf475d60f79ba378af0073a22de585453f8c3dd6c1452ab9bc \
|
--hash=sha256:222be9e15610d37a4b5a71cfa0c442178f9fd9ca02f6522a3e11c370b3d0906b \
|
||||||
--hash=sha256:75986abe572138258eb9795cb4cd73f40b2bdf8374fefa1af6ff6bb0dbc972c6 \
|
--hash=sha256:4c34373a94a357bdf60bbfee00c850f3563d634491555820b900c9a4f7eff300
|
||||||
--hash=sha256:8a4c395bb72cf51eb4318c6861c9a5ea490d48ec36a3d767220ef182445449cb \
|
# via k-diffusion
|
||||||
--hash=sha256:92e4685c6010b6b1c228ebb5fe93105d0a71e5b586483a942e04529a43e0bb42 \
|
torchvision==0.13.0+cu116 ; platform_system == "Linux" or platform_system == "Windows" \
|
||||||
--hash=sha256:9ec5654c56a22fe420dc0af0ff5cd31105f583fdb0240043ff26a7cfed7e05fb \
|
--hash=sha256:1696feadf1921c8fa1549bad774221293298288ebedaa14e44bc3e57e964a369 \
|
||||||
--hash=sha256:ba8b7d3c33f63feb29c7dd8c0db68b735d0c9d924ff4e84121b4b20b17cec7a5 \
|
--hash=sha256:572544b108eaf12638f3dca0f496a453c4b8d8256bcc8333d5355df641c0380c \
|
||||||
--hash=sha256:c3ceb2b3f456f0c984af71ef55f8637f178a29dc3e13a66fbb010ceead2891e1 \
|
--hash=sha256:76dbe71be271e2f246d556a8201c6f73a431851045d866c51bd945521817b892 \
|
||||||
--hash=sha256:dcf32f6d998493e76ec21a38bbb856b7402295cf7a67fb09ce5bde7e7e725756
|
--hash=sha256:90b9461f57e1219ca900bfd9e85548b840ec56d57ec331b7a7eb871113b34c0a \
|
||||||
|
--hash=sha256:941a8c958f2fe9184ce522567f4a471b52dd306891870e979fe6569062432258 \
|
||||||
|
--hash=sha256:9ce27c87a8581d00dcef416ec75f8eca9c225d8c36b81150a1f2a60eb70155dc \
|
||||||
|
--hash=sha256:cb6bf0117b8f4b601baeae54e8a6bb5c4942b054835ba997f438ddcb7adcfb90 \
|
||||||
|
--hash=sha256:d1a3c124645e3460b3e50b54eb89a2575a5036bfa618f15dc4f5d635c716069d
|
||||||
# via
|
# via
|
||||||
# -r requirements.in
|
# -r installer/requirements.in
|
||||||
# basicsr
|
# basicsr
|
||||||
# clean-fid
|
# clean-fid
|
||||||
# clip
|
# clip
|
||||||
@ -1903,10 +1937,13 @@ tqdm==4.64.1 \
|
|||||||
# taming-transformers-rom1504
|
# taming-transformers-rom1504
|
||||||
# torch-fidelity
|
# torch-fidelity
|
||||||
# transformers
|
# transformers
|
||||||
|
trampoline==0.1.2 \
|
||||||
|
--hash=sha256:36cc9a4ff9811843d177fc0e0740efbd7da39eadfe6e50c9e2937cbc06d899d9
|
||||||
|
# via torchsde
|
||||||
transformers==4.24.0 \
|
transformers==4.24.0 \
|
||||||
--hash=sha256:486f353a8e594002e48be0e2aba723d96eda839e63bfe274702a4b5eda85559b \
|
--hash=sha256:486f353a8e594002e48be0e2aba723d96eda839e63bfe274702a4b5eda85559b \
|
||||||
--hash=sha256:b7ab50039ef9bf817eff14ab974f306fd20a72350bdc9df3a858fd009419322e
|
--hash=sha256:b7ab50039ef9bf817eff14ab974f306fd20a72350bdc9df3a858fd009419322e
|
||||||
# via -r requirements.in
|
# via -r installer/requirements.in
|
||||||
typing-extensions==4.4.0 \
|
typing-extensions==4.4.0 \
|
||||||
--hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \
|
--hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \
|
||||||
--hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e
|
--hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e
|
17
binary_installer/readme.txt
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
InvokeAI
|
||||||
|
|
||||||
|
Project homepage: https://github.com/invoke-ai/InvokeAI
|
||||||
|
|
||||||
|
Installation on Windows:
|
||||||
|
NOTE: You might need to enable Windows Long Paths. If you're not sure,
|
||||||
|
then you almost certainly need to. Simply double-click the 'WinLongPathsEnabled.reg'
|
||||||
|
file. Note that you will need to have admin privileges in order to
|
||||||
|
do this.
|
||||||
|
|
||||||
|
Please double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
||||||
|
|
||||||
|
Installation on Linux and Mac:
|
||||||
|
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
||||||
|
|
||||||
|
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh'
|
||||||
|
file (on Linux/Mac) to start InvokeAI.
|
32
binary_installer/requirements.in
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
--prefer-binary
|
||||||
|
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
||||||
|
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||||
|
--trusted-host https://download.pytorch.org
|
||||||
|
accelerate~=0.14
|
||||||
|
albumentations
|
||||||
|
diffusers
|
||||||
|
eventlet
|
||||||
|
flask_cors
|
||||||
|
flask_socketio
|
||||||
|
flaskwebgui==1.0.3
|
||||||
|
getpass_asterisk
|
||||||
|
imageio-ffmpeg
|
||||||
|
pyreadline3
|
||||||
|
realesrgan
|
||||||
|
send2trash
|
||||||
|
streamlit
|
||||||
|
taming-transformers-rom1504
|
||||||
|
test-tube
|
||||||
|
torch-fidelity
|
||||||
|
torch==1.12.1 ; platform_system == 'Darwin'
|
||||||
|
torch==1.12.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
|
||||||
|
torchvision==0.13.1 ; platform_system == 'Darwin'
|
||||||
|
torchvision==0.13.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
|
||||||
|
transformers
|
||||||
|
picklescan
|
||||||
|
https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip
|
||||||
|
https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip
|
||||||
|
https://github.com/invoke-ai/GFPGAN/archive/3f5d2397361199bc4a91c08bb7d80f04d7805615.zip ; platform_system=='Windows'
|
||||||
|
https://github.com/invoke-ai/GFPGAN/archive/c796277a1cf77954e5fc0b288d7062d162894248.zip ; platform_system=='Linux' or platform_system=='Darwin'
|
||||||
|
https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip
|
||||||
|
https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip
|
80
configs/INITIAL_MODELS.yaml
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
stable-diffusion-1.5:
|
||||||
|
description: The newest Stable Diffusion version 1.5 weight file (4.27 GB)
|
||||||
|
repo_id: runwayml/stable-diffusion-v1-5
|
||||||
|
config: v1-inference.yaml
|
||||||
|
file: v1-5-pruned-emaonly.ckpt
|
||||||
|
recommended: true
|
||||||
|
width: 512
|
||||||
|
height: 512
|
||||||
|
inpainting-1.5:
|
||||||
|
description: RunwayML SD 1.5 model optimized for inpainting (4.27 GB)
|
||||||
|
repo_id: runwayml/stable-diffusion-inpainting
|
||||||
|
config: v1-inpainting-inference.yaml
|
||||||
|
file: sd-v1-5-inpainting.ckpt
|
||||||
|
recommended: True
|
||||||
|
width: 512
|
||||||
|
height: 512
|
||||||
|
ft-mse-improved-autoencoder-840000:
|
||||||
|
description: StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB)
|
||||||
|
repo_id: stabilityai/sd-vae-ft-mse-original
|
||||||
|
config: VAE/default
|
||||||
|
file: vae-ft-mse-840000-ema-pruned.ckpt
|
||||||
|
recommended: True
|
||||||
|
width: 512
|
||||||
|
height: 512
|
||||||
|
stable-diffusion-1.4:
|
||||||
|
description: The original Stable Diffusion version 1.4 weight file (4.27 GB)
|
||||||
|
repo_id: CompVis/stable-diffusion-v-1-4-original
|
||||||
|
config: v1-inference.yaml
|
||||||
|
file: sd-v1-4.ckpt
|
||||||
|
recommended: False
|
||||||
|
width: 512
|
||||||
|
height: 512
|
||||||
|
waifu-diffusion-1.3:
|
||||||
|
description: Stable Diffusion 1.4 fine tuned on anime-styled images (4.27 GB)
|
||||||
|
repo_id: hakurei/waifu-diffusion-v1-3
|
||||||
|
config: v1-inference.yaml
|
||||||
|
file: model-epoch09-float32.ckpt
|
||||||
|
recommended: False
|
||||||
|
width: 512
|
||||||
|
height: 512
|
||||||
|
trinart-2.0:
|
||||||
|
description: An SD model finetuned with ~40,000 assorted high resolution manga/anime-style pictures (2.13 GB)
|
||||||
|
repo_id: naclbit/trinart_stable_diffusion_v2
|
||||||
|
config: v1-inference.yaml
|
||||||
|
file: trinart2_step95000.ckpt
|
||||||
|
recommended: False
|
||||||
|
width: 512
|
||||||
|
height: 512
|
||||||
|
trinart_characters-1.0:
|
||||||
|
description: An SD model finetuned with 19.2M anime/manga style images (2.13 GB)
|
||||||
|
repo_id: naclbit/trinart_characters_19.2m_stable_diffusion_v1
|
||||||
|
config: v1-inference.yaml
|
||||||
|
file: trinart_characters_it4_v1.ckpt
|
||||||
|
recommended: False
|
||||||
|
width: 512
|
||||||
|
height: 512
|
||||||
|
trinart_vae:
|
||||||
|
description: Custom autoencoder for trinart_characters
|
||||||
|
repo_id: naclbit/trinart_characters_19.2m_stable_diffusion_v1
|
||||||
|
config: VAE/trinart
|
||||||
|
file: autoencoder_fix_kl-f8-trinart_characters.ckpt
|
||||||
|
recommended: False
|
||||||
|
width: 512
|
||||||
|
height: 512
|
||||||
|
papercut-1.0:
|
||||||
|
description: SD 1.5 fine-tuned for papercut art (use "PaperCut" in your prompts) (2.13 GB)
|
||||||
|
repo_id: Fictiverse/Stable_Diffusion_PaperCut_Model
|
||||||
|
config: v1-inference.yaml
|
||||||
|
file: PaperCut_v1.ckpt
|
||||||
|
recommended: False
|
||||||
|
width: 512
|
||||||
|
height: 512
|
||||||
|
voxel_art-1.0:
|
||||||
|
description: Stable Diffusion trained on voxel art (use "VoxelArt" in your prompts) (4.27 GB)
|
||||||
|
repo_id: Fictiverse/Stable_Diffusion_VoxelArt_Model
|
||||||
|
config: v1-inference.yaml
|
||||||
|
file: VoxelArt_v1.ckpt
|
||||||
|
recommended: False
|
||||||
|
width: 512
|
||||||
|
height: 512
|
@ -7,8 +7,8 @@
|
|||||||
# was trained on.
|
# was trained on.
|
||||||
stable-diffusion-1.5:
|
stable-diffusion-1.5:
|
||||||
description: The newest Stable Diffusion version 1.5 weight file (4.27 GB)
|
description: The newest Stable Diffusion version 1.5 weight file (4.27 GB)
|
||||||
weights: ./models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt
|
weights: models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt
|
||||||
config: ./configs/stable-diffusion/v1-inference.yaml
|
config: configs/stable-diffusion/v1-inference.yaml
|
||||||
width: 512
|
width: 512
|
||||||
height: 512
|
height: 512
|
||||||
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||||
@ -25,3 +25,5 @@ inpainting-1.5:
|
|||||||
config: configs/stable-diffusion/v1-inpainting-inference.yaml
|
config: configs/stable-diffusion/v1-inpainting-inference.yaml
|
||||||
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||||
description: RunwayML SD 1.5 model optimized for inpainting
|
description: RunwayML SD 1.5 model optimized for inpainting
|
||||||
|
width: 512
|
||||||
|
height: 512
|
||||||
|
803
configs/sd-concepts.txt
Normal file
@ -0,0 +1,803 @@
|
|||||||
|
sd-concepts-library/001glitch-core
|
||||||
|
sd-concepts-library/2814-roth
|
||||||
|
sd-concepts-library/3d-female-cyborgs
|
||||||
|
sd-concepts-library/4tnght
|
||||||
|
sd-concepts-library/80s-anime-ai
|
||||||
|
sd-concepts-library/80s-anime-ai-being
|
||||||
|
sd-concepts-library/852style-girl
|
||||||
|
sd-concepts-library/8bit
|
||||||
|
sd-concepts-library/8sconception
|
||||||
|
sd-concepts-library/Aflac-duck
|
||||||
|
sd-concepts-library/Akitsuki
|
||||||
|
sd-concepts-library/Atako
|
||||||
|
sd-concepts-library/Exodus-Styling
|
||||||
|
sd-concepts-library/RINGAO
|
||||||
|
sd-concepts-library/a-female-hero-from-the-legend-of-mir
|
||||||
|
sd-concepts-library/a-hat-kid
|
||||||
|
sd-concepts-library/a-tale-of-two-empires
|
||||||
|
sd-concepts-library/aadhav-face
|
||||||
|
sd-concepts-library/aavegotchi
|
||||||
|
sd-concepts-library/abby-face
|
||||||
|
sd-concepts-library/abstract-concepts
|
||||||
|
sd-concepts-library/accurate-angel
|
||||||
|
sd-concepts-library/agm-style-nao
|
||||||
|
sd-concepts-library/aj-fosik
|
||||||
|
sd-concepts-library/alberto-mielgo
|
||||||
|
sd-concepts-library/alex-portugal
|
||||||
|
sd-concepts-library/alex-thumbnail-object-2000-steps
|
||||||
|
sd-concepts-library/aleyna-tilki
|
||||||
|
sd-concepts-library/alf
|
||||||
|
sd-concepts-library/alicebeta
|
||||||
|
sd-concepts-library/alien-avatar
|
||||||
|
sd-concepts-library/alisa
|
||||||
|
sd-concepts-library/all-rings-albuns
|
||||||
|
sd-concepts-library/altvent
|
||||||
|
sd-concepts-library/altyn-helmet
|
||||||
|
sd-concepts-library/amine
|
||||||
|
sd-concepts-library/amogus
|
||||||
|
sd-concepts-library/anders-zorn
|
||||||
|
sd-concepts-library/angus-mcbride-style
|
||||||
|
sd-concepts-library/animalve3-1500seq
|
||||||
|
sd-concepts-library/anime-background-style
|
||||||
|
sd-concepts-library/anime-background-style-v2
|
||||||
|
sd-concepts-library/anime-boy
|
||||||
|
sd-concepts-library/anime-girl
|
||||||
|
sd-concepts-library/anyXtronXredshift
|
||||||
|
sd-concepts-library/anya-forger
|
||||||
|
sd-concepts-library/apex-wingman
|
||||||
|
sd-concepts-library/apulian-rooster-v0-1
|
||||||
|
sd-concepts-library/arcane-face
|
||||||
|
sd-concepts-library/arcane-style-jv
|
||||||
|
sd-concepts-library/arcimboldo-style
|
||||||
|
sd-concepts-library/armando-reveron-style
|
||||||
|
sd-concepts-library/armor-concept
|
||||||
|
sd-concepts-library/arq-render
|
||||||
|
sd-concepts-library/art-brut
|
||||||
|
sd-concepts-library/arthur1
|
||||||
|
sd-concepts-library/artist-yukiko-kanagai
|
||||||
|
sd-concepts-library/arwijn
|
||||||
|
sd-concepts-library/ashiok
|
||||||
|
sd-concepts-library/at-wolf-boy-object
|
||||||
|
sd-concepts-library/atm-ant
|
||||||
|
sd-concepts-library/atm-ant-2
|
||||||
|
sd-concepts-library/axe-tattoo
|
||||||
|
sd-concepts-library/ayush-spider-spr
|
||||||
|
sd-concepts-library/azura-from-vibrant-venture
|
||||||
|
sd-concepts-library/ba-shiroko
|
||||||
|
sd-concepts-library/babau
|
||||||
|
sd-concepts-library/babs-bunny
|
||||||
|
sd-concepts-library/babushork
|
||||||
|
sd-concepts-library/backrooms
|
||||||
|
sd-concepts-library/bad_Hub_Hugh
|
||||||
|
sd-concepts-library/bada-club
|
||||||
|
sd-concepts-library/baldi
|
||||||
|
sd-concepts-library/baluchitherian
|
||||||
|
sd-concepts-library/bamse
|
||||||
|
sd-concepts-library/bamse-og-kylling
|
||||||
|
sd-concepts-library/bee
|
||||||
|
sd-concepts-library/beholder
|
||||||
|
sd-concepts-library/beldam
|
||||||
|
sd-concepts-library/belen
|
||||||
|
sd-concepts-library/bella-goth
|
||||||
|
sd-concepts-library/belle-delphine
|
||||||
|
sd-concepts-library/bert-muppet
|
||||||
|
sd-concepts-library/better-collage3
|
||||||
|
sd-concepts-library/between2-mt-fade
|
||||||
|
sd-concepts-library/birb-style
|
||||||
|
sd-concepts-library/black-and-white-design
|
||||||
|
sd-concepts-library/black-waifu
|
||||||
|
sd-concepts-library/bloo
|
||||||
|
sd-concepts-library/blue-haired-boy
|
||||||
|
sd-concepts-library/blue-zombie
|
||||||
|
sd-concepts-library/blue-zombiee
|
||||||
|
sd-concepts-library/bluebey
|
||||||
|
sd-concepts-library/bluebey-2
|
||||||
|
sd-concepts-library/bobs-burgers
|
||||||
|
sd-concepts-library/boissonnard
|
||||||
|
sd-concepts-library/bonzi-monkey
|
||||||
|
sd-concepts-library/borderlands
|
||||||
|
sd-concepts-library/bored-ape-textual-inversion
|
||||||
|
sd-concepts-library/boris-anderson
|
||||||
|
sd-concepts-library/bozo-22
|
||||||
|
sd-concepts-library/breakcore
|
||||||
|
sd-concepts-library/brittney-williams-art
|
||||||
|
sd-concepts-library/bruma
|
||||||
|
sd-concepts-library/brunnya
|
||||||
|
sd-concepts-library/buddha-statue
|
||||||
|
sd-concepts-library/bullvbear
|
||||||
|
sd-concepts-library/button-eyes
|
||||||
|
sd-concepts-library/canadian-goose
|
||||||
|
sd-concepts-library/canary-cap
|
||||||
|
sd-concepts-library/cancer_style
|
||||||
|
sd-concepts-library/captain-haddock
|
||||||
|
sd-concepts-library/captainkirb
|
||||||
|
sd-concepts-library/car-toy-rk
|
||||||
|
sd-concepts-library/carasibana
|
||||||
|
sd-concepts-library/carlitos-el-mago
|
||||||
|
sd-concepts-library/carrascharacter
|
||||||
|
sd-concepts-library/cartoona-animals
|
||||||
|
sd-concepts-library/cat-toy
|
||||||
|
sd-concepts-library/centaur
|
||||||
|
sd-concepts-library/cgdonny1
|
||||||
|
sd-concepts-library/cham
|
||||||
|
sd-concepts-library/chandra-nalaar
|
||||||
|
sd-concepts-library/char-con
|
||||||
|
sd-concepts-library/character-pingu
|
||||||
|
sd-concepts-library/cheburashka
|
||||||
|
sd-concepts-library/chen-1
|
||||||
|
sd-concepts-library/child-zombie
|
||||||
|
sd-concepts-library/chillpill
|
||||||
|
sd-concepts-library/chonkfrog
|
||||||
|
sd-concepts-library/chop
|
||||||
|
sd-concepts-library/christo-person
|
||||||
|
sd-concepts-library/chuck-walton
|
||||||
|
sd-concepts-library/chucky
|
||||||
|
sd-concepts-library/chungus-poodl-pet
|
||||||
|
sd-concepts-library/cindlop
|
||||||
|
sd-concepts-library/collage-cutouts
|
||||||
|
sd-concepts-library/collage14
|
||||||
|
sd-concepts-library/collage3
|
||||||
|
sd-concepts-library/collage3-hubcity
|
||||||
|
sd-concepts-library/cologne
|
||||||
|
sd-concepts-library/color-page
|
||||||
|
sd-concepts-library/colossus
|
||||||
|
sd-concepts-library/command-and-conquer-remastered-cameos
|
||||||
|
sd-concepts-library/concept-art
|
||||||
|
sd-concepts-library/conner-fawcett-style
|
||||||
|
sd-concepts-library/conway-pirate
|
||||||
|
sd-concepts-library/coop-himmelblau
|
||||||
|
sd-concepts-library/coraline
|
||||||
|
sd-concepts-library/cornell-box
|
||||||
|
sd-concepts-library/cortana
|
||||||
|
sd-concepts-library/covid-19-rapid-test
|
||||||
|
sd-concepts-library/cow-uwu
|
||||||
|
sd-concepts-library/cowboy
|
||||||
|
sd-concepts-library/crazy-1
|
||||||
|
sd-concepts-library/crazy-2
|
||||||
|
sd-concepts-library/crb-portraits
|
||||||
|
sd-concepts-library/crb-surrealz
|
||||||
|
sd-concepts-library/crbart
|
||||||
|
sd-concepts-library/crested-gecko
|
||||||
|
sd-concepts-library/crinos-form-garou
|
||||||
|
sd-concepts-library/cry-baby-style
|
||||||
|
sd-concepts-library/crybaby-style-2-0
|
||||||
|
sd-concepts-library/csgo-awp-object
|
||||||
|
sd-concepts-library/csgo-awp-texture-map
|
||||||
|
sd-concepts-library/cubex
|
||||||
|
sd-concepts-library/cumbia-peruana
|
||||||
|
sd-concepts-library/cute-bear
|
||||||
|
sd-concepts-library/cute-cat
|
||||||
|
sd-concepts-library/cute-game-style
|
||||||
|
sd-concepts-library/cyberpunk-lucy
|
||||||
|
sd-concepts-library/dabotap
|
||||||
|
sd-concepts-library/dan-mumford
|
||||||
|
sd-concepts-library/dan-seagrave-art-style
|
||||||
|
sd-concepts-library/dark-penguin-pinguinanimations
|
||||||
|
sd-concepts-library/darkpenguinanimatronic
|
||||||
|
sd-concepts-library/darkplane
|
||||||
|
sd-concepts-library/david-firth-artstyle
|
||||||
|
sd-concepts-library/david-martinez-cyberpunk
|
||||||
|
sd-concepts-library/david-martinez-edgerunners
|
||||||
|
sd-concepts-library/david-moreno-architecture
|
||||||
|
sd-concepts-library/daycare-attendant-sun-fnaf
|
||||||
|
sd-concepts-library/ddattender
|
||||||
|
sd-concepts-library/degods
|
||||||
|
sd-concepts-library/degodsheavy
|
||||||
|
sd-concepts-library/depthmap
|
||||||
|
sd-concepts-library/depthmap-style
|
||||||
|
sd-concepts-library/design
|
||||||
|
sd-concepts-library/detectivedinosaur1
|
||||||
|
sd-concepts-library/diaosu-toy
|
||||||
|
sd-concepts-library/dicoo
|
||||||
|
sd-concepts-library/dicoo2
|
||||||
|
sd-concepts-library/dishonored-portrait-styles
|
||||||
|
sd-concepts-library/disquieting-muses
|
||||||
|
sd-concepts-library/ditko
|
||||||
|
sd-concepts-library/dlooak
|
||||||
|
sd-concepts-library/doc
|
||||||
|
sd-concepts-library/doener-red-line-art
|
||||||
|
sd-concepts-library/dog
|
||||||
|
sd-concepts-library/dog-django
|
||||||
|
sd-concepts-library/doge-pound
|
||||||
|
sd-concepts-library/dong-ho
|
||||||
|
sd-concepts-library/dong-ho2
|
||||||
|
sd-concepts-library/doose-s-realistic-art-style
|
||||||
|
sd-concepts-library/dq10-anrushia
|
||||||
|
sd-concepts-library/dr-livesey
|
||||||
|
sd-concepts-library/dr-strange
|
||||||
|
sd-concepts-library/dragonborn
|
||||||
|
sd-concepts-library/dreamcore
|
||||||
|
sd-concepts-library/dreamy-painting
|
||||||
|
sd-concepts-library/drive-scorpion-jacket
|
||||||
|
sd-concepts-library/dsmuses
|
||||||
|
sd-concepts-library/dtv-pkmn
|
||||||
|
sd-concepts-library/dullboy-caricature
|
||||||
|
sd-concepts-library/duranduran
|
||||||
|
sd-concepts-library/durer-style
|
||||||
|
sd-concepts-library/dyoudim-style
|
||||||
|
sd-concepts-library/early-mishima-kurone
|
||||||
|
sd-concepts-library/eastward
|
||||||
|
sd-concepts-library/eddie
|
||||||
|
sd-concepts-library/edgerunners-style
|
||||||
|
sd-concepts-library/edgerunners-style-v2
|
||||||
|
sd-concepts-library/el-salvador-style-style
|
||||||
|
sd-concepts-library/elegant-flower
|
||||||
|
sd-concepts-library/elspeth-tirel
|
||||||
|
sd-concepts-library/eru-chitanda-casual
|
||||||
|
sd-concepts-library/erwin-olaf-style
|
||||||
|
sd-concepts-library/ettblackteapot
|
||||||
|
sd-concepts-library/explosions-cat
|
||||||
|
sd-concepts-library/eye-of-agamotto
|
||||||
|
sd-concepts-library/f-22
|
||||||
|
sd-concepts-library/facadeplace
|
||||||
|
sd-concepts-library/fairy-tale-painting-style
|
||||||
|
sd-concepts-library/fairytale
|
||||||
|
sd-concepts-library/fang-yuan-001
|
||||||
|
sd-concepts-library/faraon-love-shady
|
||||||
|
sd-concepts-library/fasina
|
||||||
|
sd-concepts-library/felps
|
||||||
|
sd-concepts-library/female-kpop-singer
|
||||||
|
sd-concepts-library/fergal-cat
|
||||||
|
sd-concepts-library/filename-2
|
||||||
|
sd-concepts-library/fileteado-porteno
|
||||||
|
sd-concepts-library/final-fantasy-logo
|
||||||
|
sd-concepts-library/fireworks-over-water
|
||||||
|
sd-concepts-library/fish
|
||||||
|
sd-concepts-library/flag-ussr
|
||||||
|
sd-concepts-library/flatic
|
||||||
|
sd-concepts-library/floral
|
||||||
|
sd-concepts-library/fluid-acrylic-jellyfish-creatures-style-of-carl-ingram-art
|
||||||
|
sd-concepts-library/fnf-boyfriend
|
||||||
|
sd-concepts-library/fold-structure
|
||||||
|
sd-concepts-library/fox-purple
|
||||||
|
sd-concepts-library/fractal
|
||||||
|
sd-concepts-library/fractal-flame
|
||||||
|
sd-concepts-library/fractal-temple-style
|
||||||
|
sd-concepts-library/frank-frazetta
|
||||||
|
sd-concepts-library/franz-unterberger
|
||||||
|
sd-concepts-library/freddy-fazbear
|
||||||
|
sd-concepts-library/freefonix-style
|
||||||
|
sd-concepts-library/furrpopasthetic
|
||||||
|
sd-concepts-library/fursona
|
||||||
|
sd-concepts-library/fzk
|
||||||
|
sd-concepts-library/galaxy-explorer
|
||||||
|
sd-concepts-library/ganyu-genshin-impact
|
||||||
|
sd-concepts-library/garcon-the-cat
|
||||||
|
sd-concepts-library/garfield-pizza-plush
|
||||||
|
sd-concepts-library/garfield-pizza-plush-v2
|
||||||
|
sd-concepts-library/gba-fe-class-cards
|
||||||
|
sd-concepts-library/gba-pokemon-sprites
|
||||||
|
sd-concepts-library/geggin
|
||||||
|
sd-concepts-library/ggplot2
|
||||||
|
sd-concepts-library/ghost-style
|
||||||
|
sd-concepts-library/ghostproject-men
|
||||||
|
sd-concepts-library/gibasachan-v0
|
||||||
|
sd-concepts-library/gim
|
||||||
|
sd-concepts-library/gio
|
||||||
|
sd-concepts-library/giygas
|
||||||
|
sd-concepts-library/glass-pipe
|
||||||
|
sd-concepts-library/glass-prism-cube
|
||||||
|
sd-concepts-library/glow-forest
|
||||||
|
sd-concepts-library/goku
|
||||||
|
sd-concepts-library/gram-tops
|
||||||
|
sd-concepts-library/green-blue-shanshui
|
||||||
|
sd-concepts-library/green-tent
|
||||||
|
sd-concepts-library/grifter
|
||||||
|
sd-concepts-library/grisstyle
|
||||||
|
sd-concepts-library/grit-toy
|
||||||
|
sd-concepts-library/gt-color-paint-2
|
||||||
|
sd-concepts-library/gta5-artwork
|
||||||
|
sd-concepts-library/guttestreker
|
||||||
|
sd-concepts-library/gymnastics-leotard-v2
|
||||||
|
sd-concepts-library/half-life-2-dog
|
||||||
|
sd-concepts-library/handstand
|
||||||
|
sd-concepts-library/hanfu-anime-style
|
||||||
|
sd-concepts-library/happy-chaos
|
||||||
|
sd-concepts-library/happy-person12345
|
||||||
|
sd-concepts-library/happy-person12345-assets
|
||||||
|
sd-concepts-library/harley-quinn
|
||||||
|
sd-concepts-library/harmless-ai-1
|
||||||
|
sd-concepts-library/harmless-ai-house-style-1
|
||||||
|
sd-concepts-library/hd-emoji
|
||||||
|
sd-concepts-library/heather
|
||||||
|
sd-concepts-library/henjo-techno-show
|
||||||
|
sd-concepts-library/herge-style
|
||||||
|
sd-concepts-library/hiten-style-nao
|
||||||
|
sd-concepts-library/hitokomoru-style-nao
|
||||||
|
sd-concepts-library/hiyuki-chan
|
||||||
|
sd-concepts-library/hk-bamboo
|
||||||
|
sd-concepts-library/hk-betweenislands
|
||||||
|
sd-concepts-library/hk-bicycle
|
||||||
|
sd-concepts-library/hk-blackandwhite
|
||||||
|
sd-concepts-library/hk-breakfast
|
||||||
|
sd-concepts-library/hk-buses
|
||||||
|
sd-concepts-library/hk-clouds
|
||||||
|
sd-concepts-library/hk-goldbuddha
|
||||||
|
sd-concepts-library/hk-goldenlantern
|
||||||
|
sd-concepts-library/hk-hkisland
|
||||||
|
sd-concepts-library/hk-leaves
|
||||||
|
sd-concepts-library/hk-market
|
||||||
|
sd-concepts-library/hk-oldcamera
|
||||||
|
sd-concepts-library/hk-opencamera
|
||||||
|
sd-concepts-library/hk-peach
|
||||||
|
sd-concepts-library/hk-phonevax
|
||||||
|
sd-concepts-library/hk-streetpeople
|
||||||
|
sd-concepts-library/hk-vintage
|
||||||
|
sd-concepts-library/hoi4
|
||||||
|
sd-concepts-library/hoi4-leaders
|
||||||
|
sd-concepts-library/homestuck-sprite
|
||||||
|
sd-concepts-library/homestuck-troll
|
||||||
|
sd-concepts-library/hours-sentry-fade
|
||||||
|
sd-concepts-library/hours-style
|
||||||
|
sd-concepts-library/hrgiger-drmacabre
|
||||||
|
sd-concepts-library/huang-guang-jian
|
||||||
|
sd-concepts-library/huatli
|
||||||
|
sd-concepts-library/huayecai820-greyscale
|
||||||
|
sd-concepts-library/hub-city
|
||||||
|
sd-concepts-library/hubris-oshri
|
||||||
|
sd-concepts-library/huckleberry
|
||||||
|
sd-concepts-library/hydrasuit
|
||||||
|
sd-concepts-library/i-love-chaos
|
||||||
|
sd-concepts-library/ibere-thenorio
|
||||||
|
sd-concepts-library/ic0n
|
||||||
|
sd-concepts-library/ie-gravestone
|
||||||
|
sd-concepts-library/ikea-fabler
|
||||||
|
sd-concepts-library/illustration-style
|
||||||
|
sd-concepts-library/ilo-kunst
|
||||||
|
sd-concepts-library/ilya-shkipin
|
||||||
|
sd-concepts-library/im-poppy
|
||||||
|
sd-concepts-library/ina-art
|
||||||
|
sd-concepts-library/indian-watercolor-portraits
|
||||||
|
sd-concepts-library/indiana
|
||||||
|
sd-concepts-library/ingmar-bergman
|
||||||
|
sd-concepts-library/insidewhale
|
||||||
|
sd-concepts-library/interchanges
|
||||||
|
sd-concepts-library/inuyama-muneto-style-nao
|
||||||
|
sd-concepts-library/irasutoya
|
||||||
|
sd-concepts-library/iridescent-illustration-style
|
||||||
|
sd-concepts-library/iridescent-photo-style
|
||||||
|
sd-concepts-library/isabell-schulte-pv-pvii-3000steps
|
||||||
|
sd-concepts-library/isabell-schulte-pviii-1-image-style
|
||||||
|
sd-concepts-library/isabell-schulte-pviii-1024px-1500-steps-style
|
||||||
|
sd-concepts-library/isabell-schulte-pviii-12tiles-3000steps-style
|
||||||
|
sd-concepts-library/isabell-schulte-pviii-4-tiles-1-lr-3000-steps-style
|
||||||
|
sd-concepts-library/isabell-schulte-pviii-4-tiles-3-lr-5000-steps-style
|
||||||
|
sd-concepts-library/isabell-schulte-pviii-4tiles-500steps
|
||||||
|
sd-concepts-library/isabell-schulte-pviii-4tiles-6000steps
|
||||||
|
sd-concepts-library/isabell-schulte-pviii-style
|
||||||
|
sd-concepts-library/isometric-tile-test
|
||||||
|
sd-concepts-library/jacqueline-the-unicorn
|
||||||
|
sd-concepts-library/james-web-space-telescope
|
||||||
|
sd-concepts-library/jamie-hewlett-style
|
||||||
|
sd-concepts-library/jamiels
|
||||||
|
sd-concepts-library/jang-sung-rak-style
|
||||||
|
sd-concepts-library/jetsetdreamcastcovers
|
||||||
|
sd-concepts-library/jin-kisaragi
|
||||||
|
sd-concepts-library/jinjoon-lee-they
|
||||||
|
sd-concepts-library/jm-bergling-monogram
|
||||||
|
sd-concepts-library/joe-mad
|
||||||
|
sd-concepts-library/joe-whiteford-art-style
|
||||||
|
sd-concepts-library/joemad
|
||||||
|
sd-concepts-library/john-blanche
|
||||||
|
sd-concepts-library/johnny-silverhand
|
||||||
|
sd-concepts-library/jojo-bizzare-adventure-manga-lineart
|
||||||
|
sd-concepts-library/jos-de-kat
|
||||||
|
sd-concepts-library/junji-ito-artstyle
|
||||||
|
sd-concepts-library/kaleido
|
||||||
|
sd-concepts-library/kaneoya-sachiko
|
||||||
|
sd-concepts-library/kanovt
|
||||||
|
sd-concepts-library/kanv1
|
||||||
|
sd-concepts-library/karan-gloomy
|
||||||
|
sd-concepts-library/karl-s-lzx-1
|
||||||
|
sd-concepts-library/kasumin
|
||||||
|
sd-concepts-library/kawaii-colors
|
||||||
|
sd-concepts-library/kawaii-girl-plus-object
|
||||||
|
sd-concepts-library/kawaii-girl-plus-style
|
||||||
|
sd-concepts-library/kawaii-girl-plus-style-v1-1
|
||||||
|
sd-concepts-library/kay
|
||||||
|
sd-concepts-library/kaya-ghost-assasin
|
||||||
|
sd-concepts-library/ki
|
||||||
|
sd-concepts-library/kinda-sus
|
||||||
|
sd-concepts-library/kings-quest-agd
|
||||||
|
sd-concepts-library/kiora
|
||||||
|
sd-concepts-library/kira-sensei
|
||||||
|
sd-concepts-library/kirby
|
||||||
|
sd-concepts-library/klance
|
||||||
|
sd-concepts-library/kodakvision500t
|
||||||
|
sd-concepts-library/kogatan-shiny
|
||||||
|
sd-concepts-library/kogecha
|
||||||
|
sd-concepts-library/kojima-ayami
|
||||||
|
sd-concepts-library/koko-dog
|
||||||
|
sd-concepts-library/kuvshinov
|
||||||
|
sd-concepts-library/kysa-v-style
|
||||||
|
sd-concepts-library/laala-character
|
||||||
|
sd-concepts-library/larrette
|
||||||
|
sd-concepts-library/lavko
|
||||||
|
sd-concepts-library/lazytown-stephanie
|
||||||
|
sd-concepts-library/ldr
|
||||||
|
sd-concepts-library/ldrs
|
||||||
|
sd-concepts-library/led-toy
|
||||||
|
sd-concepts-library/lego-astronaut
|
||||||
|
sd-concepts-library/leica
|
||||||
|
sd-concepts-library/leif-jones
|
||||||
|
sd-concepts-library/lex
|
||||||
|
sd-concepts-library/liliana
|
||||||
|
sd-concepts-library/liliana-vess
|
||||||
|
sd-concepts-library/liminal-spaces-2-0
|
||||||
|
sd-concepts-library/liminalspaces
|
||||||
|
sd-concepts-library/line-art
|
||||||
|
sd-concepts-library/line-style
|
||||||
|
sd-concepts-library/linnopoke
|
||||||
|
sd-concepts-library/liquid-light
|
||||||
|
sd-concepts-library/liqwid-aquafarmer
|
||||||
|
sd-concepts-library/lizardman
|
||||||
|
sd-concepts-library/loab-character
|
||||||
|
sd-concepts-library/loab-style
|
||||||
|
sd-concepts-library/lofa
|
||||||
|
sd-concepts-library/logo-with-face-on-shield
|
||||||
|
sd-concepts-library/lolo
|
||||||
|
sd-concepts-library/looney-anime
|
||||||
|
sd-concepts-library/lost-rapper
|
||||||
|
sd-concepts-library/lphr-style
|
||||||
|
sd-concepts-library/lucario
|
||||||
|
sd-concepts-library/lucky-luke
|
||||||
|
sd-concepts-library/lugal-ki-en
|
||||||
|
sd-concepts-library/luinv2
|
||||||
|
sd-concepts-library/lula-13
|
||||||
|
sd-concepts-library/lumio
|
||||||
|
sd-concepts-library/lxj-o4
|
||||||
|
sd-concepts-library/m-geo
|
||||||
|
sd-concepts-library/m-geoo
|
||||||
|
sd-concepts-library/madhubani-art
|
||||||
|
sd-concepts-library/mafalda-character
|
||||||
|
sd-concepts-library/magic-pengel
|
||||||
|
sd-concepts-library/malika-favre-art-style
|
||||||
|
sd-concepts-library/manga-style
|
||||||
|
sd-concepts-library/marbling-art
|
||||||
|
sd-concepts-library/margo
|
||||||
|
sd-concepts-library/marty
|
||||||
|
sd-concepts-library/marty6
|
||||||
|
sd-concepts-library/mass
|
||||||
|
sd-concepts-library/masyanya
|
||||||
|
sd-concepts-library/masyunya
|
||||||
|
sd-concepts-library/mate
|
||||||
|
sd-concepts-library/matthew-stone
|
||||||
|
sd-concepts-library/mattvidpro
|
||||||
|
sd-concepts-library/maurice-quentin-de-la-tour-style
|
||||||
|
sd-concepts-library/maus
|
||||||
|
sd-concepts-library/max-foley
|
||||||
|
sd-concepts-library/mayor-richard-irvin
|
||||||
|
sd-concepts-library/mechasoulall
|
||||||
|
sd-concepts-library/medazzaland
|
||||||
|
sd-concepts-library/memnarch-mtg
|
||||||
|
sd-concepts-library/metagabe
|
||||||
|
sd-concepts-library/meyoco
|
||||||
|
sd-concepts-library/meze-audio-elite-headphones
|
||||||
|
sd-concepts-library/midjourney-style
|
||||||
|
sd-concepts-library/mikako-method
|
||||||
|
sd-concepts-library/mikako-methodi2i
|
||||||
|
sd-concepts-library/miko-3-robot
|
||||||
|
sd-concepts-library/milady
|
||||||
|
sd-concepts-library/mildemelwe-style
|
||||||
|
sd-concepts-library/million-live-akane-15k
|
||||||
|
sd-concepts-library/million-live-akane-3k
|
||||||
|
sd-concepts-library/million-live-akane-shifuku-3k
|
||||||
|
sd-concepts-library/million-live-spade-q-object-3k
|
||||||
|
sd-concepts-library/million-live-spade-q-style-3k
|
||||||
|
sd-concepts-library/minecraft-concept-art
|
||||||
|
sd-concepts-library/mishima-kurone
|
||||||
|
sd-concepts-library/mizkif
|
||||||
|
sd-concepts-library/moeb-style
|
||||||
|
sd-concepts-library/moebius
|
||||||
|
sd-concepts-library/mokoko
|
||||||
|
sd-concepts-library/mokoko-seed
|
||||||
|
sd-concepts-library/monster-girl
|
||||||
|
sd-concepts-library/monster-toy
|
||||||
|
sd-concepts-library/monte-novo
|
||||||
|
sd-concepts-library/moo-moo
|
||||||
|
sd-concepts-library/morino-hon-style
|
||||||
|
sd-concepts-library/moxxi
|
||||||
|
sd-concepts-library/msg
|
||||||
|
sd-concepts-library/mtg-card
|
||||||
|
sd-concepts-library/mtl-longsky
|
||||||
|
sd-concepts-library/mu-sadr
|
||||||
|
sd-concepts-library/munch-leaks-style
|
||||||
|
sd-concepts-library/museum-by-coop-himmelblau
|
||||||
|
sd-concepts-library/muxoyara
|
||||||
|
sd-concepts-library/my-hero-academia-style
|
||||||
|
sd-concepts-library/my-mug
|
||||||
|
sd-concepts-library/mycat
|
||||||
|
sd-concepts-library/mystical-nature
|
||||||
|
sd-concepts-library/naf
|
||||||
|
sd-concepts-library/nahiri
|
||||||
|
sd-concepts-library/namine-ritsu
|
||||||
|
sd-concepts-library/naoki-saito
|
||||||
|
sd-concepts-library/nard-style
|
||||||
|
sd-concepts-library/naruto
|
||||||
|
sd-concepts-library/natasha-johnston
|
||||||
|
sd-concepts-library/nathan-wyatt
|
||||||
|
sd-concepts-library/naval-portrait
|
||||||
|
sd-concepts-library/nazuna
|
||||||
|
sd-concepts-library/nebula
|
||||||
|
sd-concepts-library/ned-flanders
|
||||||
|
sd-concepts-library/neon-pastel
|
||||||
|
sd-concepts-library/new-priests
|
||||||
|
sd-concepts-library/nic-papercuts
|
||||||
|
sd-concepts-library/nikodim
|
||||||
|
sd-concepts-library/nissa-revane
|
||||||
|
sd-concepts-library/nixeu
|
||||||
|
sd-concepts-library/noggles
|
||||||
|
sd-concepts-library/nomad
|
||||||
|
sd-concepts-library/nouns-glasses
|
||||||
|
sd-concepts-library/obama-based-on-xi
|
||||||
|
sd-concepts-library/obama-self-2
|
||||||
|
sd-concepts-library/og-mox-style
|
||||||
|
sd-concepts-library/ohisashiburi-style
|
||||||
|
sd-concepts-library/oleg-kuvaev
|
||||||
|
sd-concepts-library/olli-olli
|
||||||
|
sd-concepts-library/on-kawara
|
||||||
|
sd-concepts-library/one-line-drawing
|
||||||
|
sd-concepts-library/onepunchman
|
||||||
|
sd-concepts-library/onzpo
|
||||||
|
sd-concepts-library/orangejacket
|
||||||
|
sd-concepts-library/ori
|
||||||
|
sd-concepts-library/ori-toor
|
||||||
|
sd-concepts-library/orientalist-art
|
||||||
|
sd-concepts-library/osaka-jyo
|
||||||
|
sd-concepts-library/osaka-jyo2
|
||||||
|
sd-concepts-library/osrsmini2
|
||||||
|
sd-concepts-library/osrstiny
|
||||||
|
sd-concepts-library/other-mother
|
||||||
|
sd-concepts-library/ouroboros
|
||||||
|
sd-concepts-library/outfit-items
|
||||||
|
sd-concepts-library/overprettified
|
||||||
|
sd-concepts-library/owl-house
|
||||||
|
sd-concepts-library/painted-by-silver-of-999
|
||||||
|
sd-concepts-library/painted-by-silver-of-999-2
|
||||||
|
sd-concepts-library/painted-student
|
||||||
|
sd-concepts-library/painting
|
||||||
|
sd-concepts-library/pantone-milk
|
||||||
|
sd-concepts-library/paolo-bonolis
|
||||||
|
sd-concepts-library/party-girl
|
||||||
|
sd-concepts-library/pascalsibertin
|
||||||
|
sd-concepts-library/pastelartstyle
|
||||||
|
sd-concepts-library/paul-noir
|
||||||
|
sd-concepts-library/pen-ink-portraits-bennorthen
|
||||||
|
sd-concepts-library/phan
|
||||||
|
sd-concepts-library/phan-s-collage
|
||||||
|
sd-concepts-library/phc
|
||||||
|
sd-concepts-library/phoenix-01
|
||||||
|
sd-concepts-library/pineda-david
|
||||||
|
sd-concepts-library/pink-beast-pastelae-style
|
||||||
|
sd-concepts-library/pintu
|
||||||
|
sd-concepts-library/pion-by-august-semionov
|
||||||
|
sd-concepts-library/piotr-jablonski
|
||||||
|
sd-concepts-library/pixel-mania
|
||||||
|
sd-concepts-library/pixel-toy
|
||||||
|
sd-concepts-library/pjablonski-style
|
||||||
|
sd-concepts-library/plant-style
|
||||||
|
sd-concepts-library/plen-ki-mun
|
||||||
|
sd-concepts-library/pokemon-conquest-sprites
|
||||||
|
sd-concepts-library/pool-test
|
||||||
|
sd-concepts-library/poolrooms
|
||||||
|
sd-concepts-library/poring-ragnarok-online
|
||||||
|
sd-concepts-library/poutine-dish
|
||||||
|
sd-concepts-library/princess-knight-art
|
||||||
|
sd-concepts-library/progress-chip
|
||||||
|
sd-concepts-library/puerquis-toy
|
||||||
|
sd-concepts-library/purplefishli
|
||||||
|
sd-concepts-library/pyramidheadcosplay
|
||||||
|
sd-concepts-library/qpt-atrium
|
||||||
|
sd-concepts-library/quiesel
|
||||||
|
sd-concepts-library/r-crumb-style
|
||||||
|
sd-concepts-library/rahkshi-bionicle
|
||||||
|
sd-concepts-library/raichu
|
||||||
|
sd-concepts-library/rail-scene
|
||||||
|
sd-concepts-library/rail-scene-style
|
||||||
|
sd-concepts-library/ralph-mcquarrie
|
||||||
|
sd-concepts-library/ransom
|
||||||
|
sd-concepts-library/rayne-weynolds
|
||||||
|
sd-concepts-library/rcrumb-portraits-style
|
||||||
|
sd-concepts-library/rd-chaos
|
||||||
|
sd-concepts-library/rd-paintings
|
||||||
|
sd-concepts-library/red-glasses
|
||||||
|
sd-concepts-library/reeducation-camp
|
||||||
|
sd-concepts-library/reksio-dog
|
||||||
|
sd-concepts-library/rektguy
|
||||||
|
sd-concepts-library/remert
|
||||||
|
sd-concepts-library/renalla
|
||||||
|
sd-concepts-library/repeat
|
||||||
|
sd-concepts-library/retro-girl
|
||||||
|
sd-concepts-library/retro-mecha-rangers
|
||||||
|
sd-concepts-library/retropixelart-pinguin
|
||||||
|
sd-concepts-library/rex-deno
|
||||||
|
sd-concepts-library/rhizomuse-machine-bionic-sculpture
|
||||||
|
sd-concepts-library/ricar
|
||||||
|
sd-concepts-library/rickyart
|
||||||
|
sd-concepts-library/rico-face
|
||||||
|
sd-concepts-library/riker-doll
|
||||||
|
sd-concepts-library/rikiart
|
||||||
|
sd-concepts-library/rikiboy-art
|
||||||
|
sd-concepts-library/rilakkuma
|
||||||
|
sd-concepts-library/rishusei-style
|
||||||
|
sd-concepts-library/rj-palmer
|
||||||
|
sd-concepts-library/rl-pkmn-test
|
||||||
|
sd-concepts-library/road-to-ruin
|
||||||
|
sd-concepts-library/robertnava
|
||||||
|
sd-concepts-library/roblox-avatar
|
||||||
|
sd-concepts-library/roy-lichtenstein
|
||||||
|
sd-concepts-library/ruan-jia
|
||||||
|
sd-concepts-library/russian
|
||||||
|
sd-concepts-library/s1m-naoto-ohshima
|
||||||
|
sd-concepts-library/saheeli-rai
|
||||||
|
sd-concepts-library/sakimi-style
|
||||||
|
sd-concepts-library/salmonid
|
||||||
|
sd-concepts-library/sam-yang
|
||||||
|
sd-concepts-library/sanguo-guanyu
|
||||||
|
sd-concepts-library/sas-style
|
||||||
|
sd-concepts-library/scarlet-witch
|
||||||
|
sd-concepts-library/schloss-mosigkau
|
||||||
|
sd-concepts-library/scrap-style
|
||||||
|
sd-concepts-library/scratch-project
|
||||||
|
sd-concepts-library/sculptural-style
|
||||||
|
sd-concepts-library/sd-concepts-library-uma-meme
|
||||||
|
sd-concepts-library/seamless-ground
|
||||||
|
sd-concepts-library/selezneva-alisa
|
||||||
|
sd-concepts-library/sem-mac2n
|
||||||
|
sd-concepts-library/senneca
|
||||||
|
sd-concepts-library/seraphimmoonshadow-art
|
||||||
|
sd-concepts-library/sewerslvt
|
||||||
|
sd-concepts-library/she-hulk-law-art
|
||||||
|
sd-concepts-library/she-mask
|
||||||
|
sd-concepts-library/sherhook-painting
|
||||||
|
sd-concepts-library/sherhook-painting-v2
|
||||||
|
sd-concepts-library/shev-linocut
|
||||||
|
sd-concepts-library/shigure-ui-style
|
||||||
|
sd-concepts-library/shiny-polyman
|
||||||
|
sd-concepts-library/shrunken-head
|
||||||
|
sd-concepts-library/shu-doll
|
||||||
|
sd-concepts-library/shvoren-style
|
||||||
|
sd-concepts-library/sims-2-portrait
|
||||||
|
sd-concepts-library/singsing
|
||||||
|
sd-concepts-library/singsing-doll
|
||||||
|
sd-concepts-library/sintez-ico
|
||||||
|
sd-concepts-library/skyfalls
|
||||||
|
sd-concepts-library/slm
|
||||||
|
sd-concepts-library/smarties
|
||||||
|
sd-concepts-library/smiling-friend-style
|
||||||
|
sd-concepts-library/smooth-pencils
|
||||||
|
sd-concepts-library/smurf-style
|
||||||
|
sd-concepts-library/smw-map
|
||||||
|
sd-concepts-library/society-finch
|
||||||
|
sd-concepts-library/sorami-style
|
||||||
|
sd-concepts-library/spider-gwen
|
||||||
|
sd-concepts-library/spritual-monsters
|
||||||
|
sd-concepts-library/stable-diffusion-conceptualizer
|
||||||
|
sd-concepts-library/star-tours-posters
|
||||||
|
sd-concepts-library/stardew-valley-pixel-art
|
||||||
|
sd-concepts-library/starhavenmachinegods
|
||||||
|
sd-concepts-library/sterling-archer
|
||||||
|
sd-concepts-library/stretch-re1-robot
|
||||||
|
sd-concepts-library/stuffed-penguin-toy
|
||||||
|
sd-concepts-library/style-of-marc-allante
|
||||||
|
sd-concepts-library/summie-style
|
||||||
|
sd-concepts-library/sunfish
|
||||||
|
sd-concepts-library/super-nintendo-cartridge
|
||||||
|
sd-concepts-library/supitcha-mask
|
||||||
|
sd-concepts-library/sushi-pixel
|
||||||
|
sd-concepts-library/swamp-choe-2
|
||||||
|
sd-concepts-library/t-skrang
|
||||||
|
sd-concepts-library/takuji-kawano
|
||||||
|
sd-concepts-library/tamiyo
|
||||||
|
sd-concepts-library/tangles
|
||||||
|
sd-concepts-library/tb303
|
||||||
|
sd-concepts-library/tcirle
|
||||||
|
sd-concepts-library/teelip-ir-landscape
|
||||||
|
sd-concepts-library/teferi
|
||||||
|
sd-concepts-library/tela-lenca
|
||||||
|
sd-concepts-library/tela-lenca2
|
||||||
|
sd-concepts-library/terraria-style
|
||||||
|
sd-concepts-library/tesla-bot
|
||||||
|
sd-concepts-library/test
|
||||||
|
sd-concepts-library/test-epson
|
||||||
|
sd-concepts-library/test2
|
||||||
|
sd-concepts-library/testing
|
||||||
|
sd-concepts-library/thalasin
|
||||||
|
sd-concepts-library/thegeneral
|
||||||
|
sd-concepts-library/thorneworks
|
||||||
|
sd-concepts-library/threestooges
|
||||||
|
sd-concepts-library/thunderdome-cover
|
||||||
|
sd-concepts-library/thunderdome-covers
|
||||||
|
sd-concepts-library/ti-junglepunk-v0
|
||||||
|
sd-concepts-library/tili-concept
|
||||||
|
sd-concepts-library/titan-robot
|
||||||
|
sd-concepts-library/tnj
|
||||||
|
sd-concepts-library/toho-pixel
|
||||||
|
sd-concepts-library/tomcat
|
||||||
|
sd-concepts-library/tonal1
|
||||||
|
sd-concepts-library/tony-diterlizzi-s-planescape-art
|
||||||
|
sd-concepts-library/towerplace
|
||||||
|
sd-concepts-library/toy
|
||||||
|
sd-concepts-library/toy-bonnie-plush
|
||||||
|
sd-concepts-library/toyota-sera
|
||||||
|
sd-concepts-library/transmutation-circles
|
||||||
|
sd-concepts-library/trash-polka-artstyle
|
||||||
|
sd-concepts-library/travis-bedel
|
||||||
|
sd-concepts-library/trigger-studio
|
||||||
|
sd-concepts-library/trust-support
|
||||||
|
sd-concepts-library/trypophobia
|
||||||
|
sd-concepts-library/ttte
|
||||||
|
sd-concepts-library/tubby
|
||||||
|
sd-concepts-library/tubby-cats
|
||||||
|
sd-concepts-library/tudisco
|
||||||
|
sd-concepts-library/turtlepics
|
||||||
|
sd-concepts-library/type
|
||||||
|
sd-concepts-library/ugly-sonic
|
||||||
|
sd-concepts-library/uliana-kudinova
|
||||||
|
sd-concepts-library/uma
|
||||||
|
sd-concepts-library/uma-clean-object
|
||||||
|
sd-concepts-library/uma-meme
|
||||||
|
sd-concepts-library/uma-meme-style
|
||||||
|
sd-concepts-library/uma-style-classic
|
||||||
|
sd-concepts-library/unfinished-building
|
||||||
|
sd-concepts-library/urivoldemort
|
||||||
|
sd-concepts-library/uzumaki
|
||||||
|
sd-concepts-library/valorantstyle
|
||||||
|
sd-concepts-library/vb-mox
|
||||||
|
sd-concepts-library/vcr-classique
|
||||||
|
sd-concepts-library/venice
|
||||||
|
sd-concepts-library/vespertine
|
||||||
|
sd-concepts-library/victor-narm
|
||||||
|
sd-concepts-library/vietstoneking
|
||||||
|
sd-concepts-library/vivien-reid
|
||||||
|
sd-concepts-library/vkuoo1
|
||||||
|
sd-concepts-library/vraska
|
||||||
|
sd-concepts-library/w3u
|
||||||
|
sd-concepts-library/walter-wick-photography
|
||||||
|
sd-concepts-library/warhammer-40k-drawing-style
|
||||||
|
sd-concepts-library/waterfallshadow
|
||||||
|
sd-concepts-library/wayne-reynolds-character
|
||||||
|
sd-concepts-library/wedding
|
||||||
|
sd-concepts-library/wedding-HandPainted
|
||||||
|
sd-concepts-library/werebloops
|
||||||
|
sd-concepts-library/wheatland
|
||||||
|
sd-concepts-library/wheatland-arknight
|
||||||
|
sd-concepts-library/wheelchair
|
||||||
|
sd-concepts-library/wildkat
|
||||||
|
sd-concepts-library/willy-hd
|
||||||
|
sd-concepts-library/wire-angels
|
||||||
|
sd-concepts-library/wish-artist-stile
|
||||||
|
sd-concepts-library/wlop-style
|
||||||
|
sd-concepts-library/wojak
|
||||||
|
sd-concepts-library/wojaks-now
|
||||||
|
sd-concepts-library/wojaks-now-now-now
|
||||||
|
sd-concepts-library/xatu
|
||||||
|
sd-concepts-library/xatu2
|
||||||
|
sd-concepts-library/xbh
|
||||||
|
sd-concepts-library/xi
|
||||||
|
sd-concepts-library/xidiversity
|
||||||
|
sd-concepts-library/xioboma
|
||||||
|
sd-concepts-library/xuna
|
||||||
|
sd-concepts-library/xyz
|
||||||
|
sd-concepts-library/yb-anime
|
||||||
|
sd-concepts-library/yerba-mate
|
||||||
|
sd-concepts-library/yesdelete
|
||||||
|
sd-concepts-library/yf21
|
||||||
|
sd-concepts-library/yilanov2
|
||||||
|
sd-concepts-library/yinit
|
||||||
|
sd-concepts-library/yoji-shinkawa-style
|
||||||
|
sd-concepts-library/yolandi-visser
|
||||||
|
sd-concepts-library/yoshi
|
||||||
|
sd-concepts-library/youpi2
|
||||||
|
sd-concepts-library/youtooz-candy
|
||||||
|
sd-concepts-library/yuji-himukai-style
|
||||||
|
sd-concepts-library/zaney
|
||||||
|
sd-concepts-library/zaneypixelz
|
||||||
|
sd-concepts-library/zdenek-art
|
||||||
|
sd-concepts-library/zero
|
||||||
|
sd-concepts-library/zero-bottle
|
||||||
|
sd-concepts-library/zero-suit-samus
|
||||||
|
sd-concepts-library/zillertal-can
|
||||||
|
sd-concepts-library/zizigooloo
|
||||||
|
sd-concepts-library/zk
|
||||||
|
sd-concepts-library/zoroark
|
@ -30,7 +30,7 @@ model:
|
|||||||
target: ldm.modules.embedding_manager.EmbeddingManager
|
target: ldm.modules.embedding_manager.EmbeddingManager
|
||||||
params:
|
params:
|
||||||
placeholder_strings: ["*"]
|
placeholder_strings: ["*"]
|
||||||
initializer_words: ['face', 'man', 'photo', 'africanmale']
|
initializer_words: ['sculpture']
|
||||||
per_image_tokens: false
|
per_image_tokens: false
|
||||||
num_vectors_per_token: 1
|
num_vectors_per_token: 1
|
||||||
progressive_words: False
|
progressive_words: False
|
||||||
|
@ -30,9 +30,9 @@ model:
|
|||||||
target: ldm.modules.embedding_manager.EmbeddingManager
|
target: ldm.modules.embedding_manager.EmbeddingManager
|
||||||
params:
|
params:
|
||||||
placeholder_strings: ["*"]
|
placeholder_strings: ["*"]
|
||||||
initializer_words: ['face', 'man', 'photo', 'africanmale']
|
initializer_words: ['sculpture']
|
||||||
per_image_tokens: false
|
per_image_tokens: false
|
||||||
num_vectors_per_token: 1
|
num_vectors_per_token: 8
|
||||||
progressive_words: False
|
progressive_words: False
|
||||||
|
|
||||||
unet_config:
|
unet_config:
|
||||||
|
@ -22,7 +22,7 @@ model:
|
|||||||
target: ldm.modules.embedding_manager.EmbeddingManager
|
target: ldm.modules.embedding_manager.EmbeddingManager
|
||||||
params:
|
params:
|
||||||
placeholder_strings: ["*"]
|
placeholder_strings: ["*"]
|
||||||
initializer_words: ['face', 'man', 'photo', 'africanmale']
|
initializer_words: ['sculpture']
|
||||||
per_image_tokens: false
|
per_image_tokens: false
|
||||||
num_vectors_per_token: 6
|
num_vectors_per_token: 6
|
||||||
progressive_words: False
|
progressive_words: False
|
||||||
|
@ -1,84 +1,65 @@
|
|||||||
FROM ubuntu AS get_miniconda
|
FROM python:3.10-slim AS builder
|
||||||
|
|
||||||
SHELL ["/bin/bash", "-c"]
|
|
||||||
|
|
||||||
# install wget
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
wget \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# download and install miniconda
|
|
||||||
ARG conda_version=py39_4.12.0-Linux-x86_64
|
|
||||||
ARG conda_prefix=/opt/conda
|
|
||||||
RUN wget --progress=dot:giga -O /miniconda.sh \
|
|
||||||
https://repo.anaconda.com/miniconda/Miniconda3-${conda_version}.sh \
|
|
||||||
&& bash /miniconda.sh -b -p ${conda_prefix} \
|
|
||||||
&& rm -f /miniconda.sh
|
|
||||||
|
|
||||||
FROM ubuntu AS invokeai
|
|
||||||
|
|
||||||
# use bash
|
# use bash
|
||||||
SHELL [ "/bin/bash", "-c" ]
|
SHELL [ "/bin/bash", "-c" ]
|
||||||
|
|
||||||
# clean bashrc
|
|
||||||
RUN echo "" > ~/.bashrc
|
|
||||||
|
|
||||||
# Install necesarry packages
|
# Install necesarry packages
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
gcc \
|
gcc=4:10.2.* \
|
||||||
git \
|
libgl1-mesa-glx=20.3.* \
|
||||||
libgl1-mesa-glx \
|
libglib2.0-0=2.66.* \
|
||||||
libglib2.0-0 \
|
python3-dev=3.9.* \
|
||||||
pip \
|
|
||||||
python3 \
|
|
||||||
python3-dev \
|
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# clone repository, create models.yaml and create symlinks
|
# set WORKDIR, PATH and copy sources
|
||||||
ARG invokeai_git=invoke-ai/InvokeAI
|
ARG APPDIR=/usr/src/app
|
||||||
ARG invokeai_branch=main
|
WORKDIR ${APPDIR}
|
||||||
ARG project_name=invokeai
|
ENV PATH ${APPDIR}/.venv/bin:$PATH
|
||||||
ARG conda_env_file=environment-lin-cuda.yml
|
ARG PIP_REQUIREMENTS=requirements-lin-cuda.txt
|
||||||
RUN git clone -b ${invokeai_branch} https://github.com/${invokeai_git}.git "/${project_name}" \
|
COPY . ./environments-and-requirements/${PIP_REQUIREMENTS} ./
|
||||||
&& cp \
|
|
||||||
"/${project_name}/configs/models.yaml.example" \
|
# install requirements
|
||||||
"/${project_name}/configs/models.yaml" \
|
RUN python3 -m venv .venv \
|
||||||
|
&& pip install \
|
||||||
|
--upgrade \
|
||||||
|
--no-cache-dir \
|
||||||
|
'wheel>=0.38.4' \
|
||||||
|
&& pip install \
|
||||||
|
--no-cache-dir \
|
||||||
|
-r ${PIP_REQUIREMENTS}
|
||||||
|
|
||||||
|
FROM python:3.10-slim AS runtime
|
||||||
|
|
||||||
|
# setup environment
|
||||||
|
ARG APPDIR=/usr/src/app
|
||||||
|
WORKDIR ${APPDIR}
|
||||||
|
COPY --from=builder ${APPDIR} .
|
||||||
|
ENV \
|
||||||
|
PATH=${APPDIR}/.venv/bin:$PATH \
|
||||||
|
INVOKEAI_ROOT=/data \
|
||||||
|
INVOKE_MODEL_RECONFIGURE=--yes
|
||||||
|
|
||||||
|
# Install necesarry packages
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
build-essential=12.9 \
|
||||||
|
libgl1-mesa-glx=20.3.* \
|
||||||
|
libglib2.0-0=2.66.* \
|
||||||
|
libopencv-dev=4.5.* \
|
||||||
&& ln -sf \
|
&& ln -sf \
|
||||||
"/${project_name}/environments-and-requirements/${conda_env_file}" \
|
/usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv4.pc \
|
||||||
"/${project_name}/environment.yml" \
|
/usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv.pc \
|
||||||
&& ln -sf \
|
&& python3 -c "from patchmatch import patch_match" \
|
||||||
/data/models/v1-5-pruned-emaonly.ckpt \
|
&& apt-get remove -y \
|
||||||
"/${project_name}/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt" \
|
--autoremove \
|
||||||
&& ln -sf \
|
build-essential \
|
||||||
/data/outputs/ \
|
&& apt-get autoclean \
|
||||||
"/${project_name}/outputs"
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# set workdir
|
# set Entrypoint and default CMD
|
||||||
WORKDIR "/${project_name}"
|
ENTRYPOINT [ "python3", "scripts/invoke.py" ]
|
||||||
|
CMD [ "--web", "--host=0.0.0.0" ]
|
||||||
# install conda env and preload models
|
|
||||||
ARG conda_prefix=/opt/conda
|
|
||||||
COPY --from=get_miniconda "${conda_prefix}" "${conda_prefix}"
|
|
||||||
RUN source "${conda_prefix}/etc/profile.d/conda.sh" \
|
|
||||||
&& conda init bash \
|
|
||||||
&& source ~/.bashrc \
|
|
||||||
&& conda env create \
|
|
||||||
--name "${project_name}" \
|
|
||||||
&& rm -Rf ~/.cache \
|
|
||||||
&& conda clean -afy \
|
|
||||||
&& echo "conda activate ${project_name}" >> ~/.bashrc
|
|
||||||
|
|
||||||
RUN source ~/.bashrc \
|
|
||||||
&& python scripts/preload_models.py \
|
|
||||||
--no-interactive
|
|
||||||
|
|
||||||
# Copy entrypoint and set env
|
|
||||||
ENV CONDA_PREFIX="${conda_prefix}"
|
|
||||||
ENV PROJECT_NAME="${project_name}"
|
|
||||||
COPY docker-build/entrypoint.sh /
|
|
||||||
ENTRYPOINT [ "/entrypoint.sh" ]
|
|
||||||
|
86
docker-build/Dockerfile.cloud
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
#######################
|
||||||
|
#### Builder stage ####
|
||||||
|
|
||||||
|
FROM library/ubuntu:22.04 AS builder
|
||||||
|
|
||||||
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||||
|
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
|
apt update && apt-get install -y \
|
||||||
|
git \
|
||||||
|
libglib2.0-0 \
|
||||||
|
libgl1-mesa-glx \
|
||||||
|
python3-venv \
|
||||||
|
python3-pip \
|
||||||
|
build-essential \
|
||||||
|
python3-opencv \
|
||||||
|
libopencv-dev
|
||||||
|
|
||||||
|
# This is needed for patchmatch support
|
||||||
|
RUN cd /usr/lib/x86_64-linux-gnu/pkgconfig/ &&\
|
||||||
|
ln -sf opencv4.pc opencv.pc
|
||||||
|
|
||||||
|
ARG WORKDIR=/invokeai
|
||||||
|
WORKDIR ${WORKDIR}
|
||||||
|
|
||||||
|
ENV VIRTUAL_ENV=${WORKDIR}/.venv
|
||||||
|
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||||
|
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||||
|
python3 -m venv ${VIRTUAL_ENV} &&\
|
||||||
|
pip install --extra-index-url https://download.pytorch.org/whl/cu116 \
|
||||||
|
torch==1.12.0+cu116 \
|
||||||
|
torchvision==0.13.0+cu116 &&\
|
||||||
|
pip install -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||||
|
cp environments-and-requirements/requirements-lin-cuda.txt requirements.txt && \
|
||||||
|
pip install -r requirements.txt &&\
|
||||||
|
pip install -e .
|
||||||
|
|
||||||
|
|
||||||
|
#######################
|
||||||
|
#### Runtime stage ####
|
||||||
|
|
||||||
|
FROM library/ubuntu:22.04 as runtime
|
||||||
|
|
||||||
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
ENV PYTHONUNBUFFERED=1
|
||||||
|
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
|
apt update && apt install -y --no-install-recommends \
|
||||||
|
git \
|
||||||
|
curl \
|
||||||
|
ncdu \
|
||||||
|
iotop \
|
||||||
|
bzip2 \
|
||||||
|
libglib2.0-0 \
|
||||||
|
libgl1-mesa-glx \
|
||||||
|
python3-venv \
|
||||||
|
python3-pip \
|
||||||
|
build-essential \
|
||||||
|
python3-opencv \
|
||||||
|
libopencv-dev &&\
|
||||||
|
apt-get clean && apt-get autoclean
|
||||||
|
|
||||||
|
ARG WORKDIR=/invokeai
|
||||||
|
WORKDIR ${WORKDIR}
|
||||||
|
|
||||||
|
ENV INVOKEAI_ROOT=/mnt/invokeai
|
||||||
|
ENV VIRTUAL_ENV=${WORKDIR}/.venv
|
||||||
|
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||||
|
|
||||||
|
COPY --from=builder ${WORKDIR} ${WORKDIR}
|
||||||
|
COPY --from=builder /usr/lib/x86_64-linux-gnu/pkgconfig /usr/lib/x86_64-linux-gnu/pkgconfig
|
||||||
|
|
||||||
|
# build patchmatch
|
||||||
|
RUN python -c "from patchmatch import patch_match"
|
||||||
|
|
||||||
|
## workaround for non-existent initfile when runtime directory is mounted; see #1613
|
||||||
|
RUN touch /root/.invokeai
|
||||||
|
|
||||||
|
ENTRYPOINT ["bash"]
|
||||||
|
|
||||||
|
CMD ["-c", "python3 scripts/invoke.py --web --host 0.0.0.0"]
|
44
docker-build/Makefile
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
# Directory in the container where the INVOKEAI_ROOT (runtime dir) will be mounted
|
||||||
|
INVOKEAI_ROOT=/mnt/invokeai
|
||||||
|
# Host directory to contain the runtime dir. Will be mounted at INVOKEAI_ROOT path in the container
|
||||||
|
HOST_MOUNT_PATH=${HOME}/invokeai
|
||||||
|
|
||||||
|
IMAGE=local/invokeai:latest
|
||||||
|
|
||||||
|
USER=$(shell id -u)
|
||||||
|
GROUP=$(shell id -g)
|
||||||
|
|
||||||
|
# All downloaded models, config, etc will end up in ${HOST_MOUNT_PATH} on the host.
|
||||||
|
# This is consistent with the expected non-Docker behaviour.
|
||||||
|
# Contents can be moved to a persistent storage and used to prime the cache on another host.
|
||||||
|
|
||||||
|
build:
|
||||||
|
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
|
||||||
|
|
||||||
|
configure:
|
||||||
|
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||||
|
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
||||||
|
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
||||||
|
${IMAGE} -c "python scripts/configure_invokeai.py"
|
||||||
|
|
||||||
|
# Run the container with the runtime dir mounted and the web server exposed on port 9090
|
||||||
|
web:
|
||||||
|
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||||
|
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
||||||
|
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
||||||
|
-p 9090:9090 \
|
||||||
|
${IMAGE} -c "python scripts/invoke.py --web --host 0.0.0.0"
|
||||||
|
|
||||||
|
# Run the cli with the runtime dir mounted
|
||||||
|
cli:
|
||||||
|
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||||
|
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
||||||
|
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
||||||
|
${IMAGE} -c "python scripts/invoke.py"
|
||||||
|
|
||||||
|
# Run the container with the runtime dir mounted and open a bash shell
|
||||||
|
shell:
|
||||||
|
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||||
|
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} ${IMAGE} --
|
||||||
|
|
||||||
|
.PHONY: build configure web cli shell
|
@ -1,84 +1,35 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoint!!!
|
|
||||||
# configure values by using env when executing build.sh
|
|
||||||
# f.e. env ARCH=aarch64 GITHUB_INVOKE_AI=https://github.com/yourname/yourfork.git ./build.sh
|
|
||||||
|
|
||||||
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
|
||||||
|
|
||||||
invokeai_conda_version=${INVOKEAI_CONDA_VERSION:-py39_4.12.0-${platform/\//-}}
|
source ./docker-build/env.sh \
|
||||||
invokeai_conda_prefix=${INVOKEAI_CONDA_PREFIX:-\/opt\/conda}
|
|| echo "please execute docker-build/build.sh from repository root" \
|
||||||
invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment-lin-cuda.yml}
|
|| exit 1
|
||||||
invokeai_git=${INVOKEAI_GIT:-invoke-ai/InvokeAI}
|
|
||||||
invokeai_branch=${INVOKEAI_BRANCH:-main}
|
PIP_REQUIREMENTS=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt}
|
||||||
huggingface_token=${HUGGINGFACE_TOKEN?}
|
DOCKERFILE=${INVOKE_DOCKERFILE:-docker-build/Dockerfile}
|
||||||
|
|
||||||
# print the settings
|
# print the settings
|
||||||
echo "You are using these values:"
|
echo -e "You are using these values:\n"
|
||||||
echo -e "project_name:\t\t ${project_name}"
|
echo -e "Dockerfile:\t ${DOCKERFILE}"
|
||||||
echo -e "volumename:\t\t ${volumename}"
|
echo -e "Requirements:\t ${PIP_REQUIREMENTS}"
|
||||||
echo -e "arch:\t\t\t ${arch}"
|
echo -e "Volumename:\t ${VOLUMENAME}"
|
||||||
echo -e "platform:\t\t ${platform}"
|
echo -e "arch:\t\t ${ARCH}"
|
||||||
echo -e "invokeai_conda_version:\t ${invokeai_conda_version}"
|
echo -e "Platform:\t ${PLATFORM}"
|
||||||
echo -e "invokeai_conda_prefix:\t ${invokeai_conda_prefix}"
|
echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n"
|
||||||
echo -e "invokeai_conda_env_file: ${invokeai_conda_env_file}"
|
|
||||||
echo -e "invokeai_git:\t\t ${invokeai_git}"
|
|
||||||
echo -e "invokeai_tag:\t\t ${invokeai_tag}\n"
|
|
||||||
|
|
||||||
_runAlpine() {
|
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
|
||||||
docker run \
|
echo -e "Volume already exists\n"
|
||||||
--rm \
|
|
||||||
--interactive \
|
|
||||||
--tty \
|
|
||||||
--mount source="$volumename",target=/data \
|
|
||||||
--workdir /data \
|
|
||||||
alpine "$@"
|
|
||||||
}
|
|
||||||
|
|
||||||
_copyCheckpoints() {
|
|
||||||
echo "creating subfolders for models and outputs"
|
|
||||||
_runAlpine mkdir models
|
|
||||||
_runAlpine mkdir outputs
|
|
||||||
echo "downloading v1-5-pruned-emaonly.ckpt"
|
|
||||||
_runAlpine wget \
|
|
||||||
--header="Authorization: Bearer ${huggingface_token}" \
|
|
||||||
-O models/v1-5-pruned-emaonly.ckpt \
|
|
||||||
https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
|
||||||
echo "done"
|
|
||||||
}
|
|
||||||
|
|
||||||
_checkVolumeContent() {
|
|
||||||
_runAlpine ls -lhA /data/models
|
|
||||||
}
|
|
||||||
|
|
||||||
_getModelMd5s() {
|
|
||||||
_runAlpine \
|
|
||||||
alpine sh -c "md5sum /data/models/*.ckpt"
|
|
||||||
}
|
|
||||||
|
|
||||||
if [[ -n "$(docker volume ls -f name="${volumename}" -q)" ]]; then
|
|
||||||
echo "Volume already exists"
|
|
||||||
if [[ -z "$(_checkVolumeContent)" ]]; then
|
|
||||||
echo "looks empty, copying checkpoint"
|
|
||||||
_copyCheckpoints
|
|
||||||
fi
|
|
||||||
echo "Models in ${volumename}:"
|
|
||||||
_checkVolumeContent
|
|
||||||
else
|
else
|
||||||
echo -n "createing docker volume "
|
echo -n "createing docker volume "
|
||||||
docker volume create "${volumename}"
|
docker volume create "${VOLUMENAME}"
|
||||||
_copyCheckpoints
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Build Container
|
# Build Container
|
||||||
docker build \
|
docker build \
|
||||||
--platform="${platform}" \
|
--platform="${PLATFORM}" \
|
||||||
--tag "${invokeai_tag}" \
|
--tag="${INVOKEAI_TAG}" \
|
||||||
--build-arg project_name="${project_name}" \
|
--build-arg="PIP_REQUIREMENTS=${PIP_REQUIREMENTS}" \
|
||||||
--build-arg conda_version="${invokeai_conda_version}" \
|
--file="${DOCKERFILE}" \
|
||||||
--build-arg conda_prefix="${invokeai_conda_prefix}" \
|
|
||||||
--build-arg conda_env_file="${invokeai_conda_env_file}" \
|
|
||||||
--build-arg invokeai_git="${invokeai_git}" \
|
|
||||||
--build-arg invokeai_branch="${invokeai_branch}" \
|
|
||||||
--file ./docker-build/Dockerfile \
|
|
||||||
.
|
.
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
source "${CONDA_PREFIX}/etc/profile.d/conda.sh"
|
|
||||||
conda activate "${PROJECT_NAME}"
|
|
||||||
|
|
||||||
python scripts/invoke.py \
|
|
||||||
${@:---web --host=0.0.0.0}
|
|
@ -1,13 +1,10 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
project_name=${PROJECT_NAME:-invokeai}
|
# Variables shared by build.sh and run.sh
|
||||||
volumename=${VOLUMENAME:-${project_name}_data}
|
REPOSITORY_NAME=${REPOSITORY_NAME:-$(basename "$(git rev-parse --show-toplevel)")}
|
||||||
arch=${ARCH:-x86_64}
|
VOLUMENAME=${VOLUMENAME:-${REPOSITORY_NAME,,}_data}
|
||||||
platform=${PLATFORM:-Linux/${arch}}
|
ARCH=${ARCH:-$(uname -m)}
|
||||||
invokeai_tag=${INVOKEAI_TAG:-${project_name}-${arch}}
|
PLATFORM=${PLATFORM:-Linux/${ARCH}}
|
||||||
|
CONTAINER_FLAVOR=${CONTAINER_FLAVOR:-cuda}
|
||||||
export project_name
|
INVOKEAI_BRANCH=$(git branch --show)
|
||||||
export volumename
|
INVOKEAI_TAG=${REPOSITORY_NAME,,}-${CONTAINER_FLAVOR}:${INVOKEAI_TAG:-${INVOKEAI_BRANCH/\//-}}
|
||||||
export arch
|
|
||||||
export platform
|
|
||||||
export invokeai_tag
|
|
||||||
|
@ -1,15 +1,31 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
|
||||||
|
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
|
||||||
|
|
||||||
|
source ./docker-build/env.sh \
|
||||||
|
|| echo "please run from repository root" \
|
||||||
|
|| exit 1
|
||||||
|
|
||||||
|
# check if HUGGINGFACE_TOKEN is available
|
||||||
|
# You must have accepted the terms of use for required models
|
||||||
|
HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN:?Please set your token for Huggingface as HUGGINGFACE_TOKEN}
|
||||||
|
|
||||||
|
echo -e "You are using these values:\n"
|
||||||
|
echo -e "Volumename:\t ${VOLUMENAME}"
|
||||||
|
echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n"
|
||||||
|
|
||||||
docker run \
|
docker run \
|
||||||
--interactive \
|
--interactive \
|
||||||
--tty \
|
--tty \
|
||||||
--rm \
|
--rm \
|
||||||
--platform "$platform" \
|
--platform="$PLATFORM" \
|
||||||
--name "$project_name" \
|
--name="${REPOSITORY_NAME,,}" \
|
||||||
--hostname "$project_name" \
|
--hostname="${REPOSITORY_NAME,,}" \
|
||||||
--mount source="$volumename",target=/data \
|
--mount="source=$VOLUMENAME,target=/data" \
|
||||||
--publish 9090:9090 \
|
--env="HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}" \
|
||||||
"$invokeai_tag" ${1:+$@}
|
--publish=9090:9090 \
|
||||||
|
--cap-add=sys_nice \
|
||||||
|
${GPU_FLAGS:+--gpus=${GPU_FLAGS}} \
|
||||||
|
"$INVOKEAI_TAG" ${1:+$@}
|
||||||
|
@ -4,180 +4,275 @@ title: Changelog
|
|||||||
|
|
||||||
# :octicons-log-16: **Changelog**
|
# :octicons-log-16: **Changelog**
|
||||||
|
|
||||||
|
## v2.2.4 <small>(11 December 2022)</small>
|
||||||
|
|
||||||
|
**the `invokeai` directory**
|
||||||
|
|
||||||
|
Previously there were two directories to worry about, the directory that
|
||||||
|
contained the InvokeAI source code and the launcher scripts, and the `invokeai`
|
||||||
|
directory that contained the models files, embeddings, configuration and
|
||||||
|
outputs. With the 2.2.4 release, this dual system is done away with, and
|
||||||
|
everything, including the `invoke.bat` and `invoke.sh` launcher scripts, now
|
||||||
|
live in a directory named `invokeai`. By default this directory is located in
|
||||||
|
your home directory (e.g. `\Users\yourname` on Windows), but you can select
|
||||||
|
where it goes at install time.
|
||||||
|
|
||||||
|
After installation, you can delete the install directory (the one that the zip
|
||||||
|
file creates when it unpacks). Do **not** delete or move the `invokeai`
|
||||||
|
directory!
|
||||||
|
|
||||||
|
**Initialization file `invokeai/invokeai.init`**
|
||||||
|
|
||||||
|
You can place frequently-used startup options in this file, such as the default
|
||||||
|
number of steps or your preferred sampler. To keep everything in one place, this
|
||||||
|
file has now been moved into the `invokeai` directory and is named
|
||||||
|
`invokeai.init`.
|
||||||
|
|
||||||
|
**To update from Version 2.2.3**
|
||||||
|
|
||||||
|
The easiest route is to download and unpack one of the 2.2.4 installer files.
|
||||||
|
When it asks you for the location of the `invokeai` runtime directory, respond
|
||||||
|
with the path to the directory that contains your 2.2.3 `invokeai`. That is, if
|
||||||
|
`invokeai` lives at `C:\Users\fred\invokeai`, then answer with `C:\Users\fred`
|
||||||
|
and answer "Y" when asked if you want to reuse the directory.
|
||||||
|
|
||||||
|
The `update.sh` (`update.bat`) script that came with the 2.2.3 source installer
|
||||||
|
does not know about the new directory layout and won't be fully functional.
|
||||||
|
|
||||||
|
**To update to 2.2.5 (and beyond) there's now an update path**
|
||||||
|
|
||||||
|
As they become available, you can update to more recent versions of InvokeAI
|
||||||
|
using an `update.sh` (`update.bat`) script located in the `invokeai` directory.
|
||||||
|
Running it without any arguments will install the most recent version of
|
||||||
|
InvokeAI. Alternatively, you can get set releases by running the `update.sh`
|
||||||
|
script with an argument in the command shell. This syntax accepts the path to
|
||||||
|
the desired release's zip file, which you can find by clicking on the green
|
||||||
|
"Code" button on this repository's home page.
|
||||||
|
|
||||||
|
**Other 2.2.4 Improvements**
|
||||||
|
|
||||||
|
- Fix InvokeAI GUI initialization by @addianto in #1687
|
||||||
|
- fix link in documentation by @lstein in #1728
|
||||||
|
- Fix broken link by @ShawnZhong in #1736
|
||||||
|
- Remove reference to binary installer by @lstein in #1731
|
||||||
|
- documentation fixes for 2.2.3 by @lstein in #1740
|
||||||
|
- Modify installer links to point closer to the source installer by @ebr in
|
||||||
|
#1745
|
||||||
|
- add documentation warning about 1650/60 cards by @lstein in #1753
|
||||||
|
- Fix Linux source URL in installation docs by @andybearman in #1756
|
||||||
|
- Make install instructions discoverable in readme by @damian0815 in #1752
|
||||||
|
- typo fix by @ofirkris in #1755
|
||||||
|
- Non-interactive model download (support HUGGINGFACE_TOKEN) by @ebr in #1578
|
||||||
|
- fix(srcinstall): shell installer - cp scripts instead of linking by @tildebyte
|
||||||
|
in #1765
|
||||||
|
- stability and usage improvements to binary & source installers by @lstein in
|
||||||
|
#1760
|
||||||
|
- fix off-by-one bug in cross-attention-control by @damian0815 in #1774
|
||||||
|
- Eventually update APP_VERSION to 2.2.3 by @spezialspezial in #1768
|
||||||
|
- invoke script cds to its location before running by @lstein in #1805
|
||||||
|
- Make PaperCut and VoxelArt models load again by @lstein in #1730
|
||||||
|
- Fix --embedding_directory / --embedding_path not working by @blessedcoolant in
|
||||||
|
#1817
|
||||||
|
- Clean up readme by @hipsterusername in #1820
|
||||||
|
- Optimized Docker build with support for external working directory by @ebr in
|
||||||
|
#1544
|
||||||
|
- disable pushing the cloud container by @mauwii in #1831
|
||||||
|
- Fix docker push github action and expand with additional metadata by @ebr in
|
||||||
|
#1837
|
||||||
|
- Fix Broken Link To Notebook by @VedantMadane in #1821
|
||||||
|
- Account for flat models by @spezialspezial in #1766
|
||||||
|
- Update invoke.bat.in isolate environment variables by @lynnewu in #1833
|
||||||
|
- Arch Linux Specific PatchMatch Instructions & fixing conda install on linux by
|
||||||
|
@SammCheese in #1848
|
||||||
|
- Make force free GPU memory work in img2img by @addianto in #1844
|
||||||
|
- New installer by @lstein
|
||||||
|
|
||||||
|
## v2.2.3 <small>(2 December 2022)</small>
|
||||||
|
|
||||||
|
!!! Note
|
||||||
|
|
||||||
|
This point release removes references to the binary installer from the
|
||||||
|
installation guide. The binary installer is not stable at the current
|
||||||
|
time. First time users are encouraged to use the "source" installer as
|
||||||
|
described in [Installing InvokeAI with the Source Installer](installation/INSTALL_SOURCE.md)
|
||||||
|
|
||||||
|
With InvokeAI 2.2, this project now provides enthusiasts and professionals a
|
||||||
|
robust workflow solution for creating AI-generated and human facilitated
|
||||||
|
compositions. Additional enhancements have been made as well, improving safety,
|
||||||
|
ease of use, and installation.
|
||||||
|
|
||||||
|
Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
|
||||||
|
512x768 image (and less for smaller images), and is compatible with
|
||||||
|
Windows/Linux/Mac (M1 & M2).
|
||||||
|
|
||||||
|
You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
|
||||||
|
introduces the main WebUI enhancement for version 2.2 -
|
||||||
|
[The Unified Canvas](features/UNIFIED_CANVAS.md). This new workflow is the
|
||||||
|
biggest enhancement added to the WebUI to date, and unlocks a stunning amount of
|
||||||
|
potential for users to create and iterate on their creations. The following
|
||||||
|
sections describe what's new for InvokeAI.
|
||||||
|
|
||||||
|
## v2.2.2 <small>(30 November 2022)</small>
|
||||||
|
|
||||||
|
!!! note
|
||||||
|
|
||||||
|
The binary installer is not ready for prime time. First time users are recommended to install via the "source" installer accessible through the links at the bottom of this page.****
|
||||||
|
|
||||||
|
With InvokeAI 2.2, this project now provides enthusiasts and professionals a
|
||||||
|
robust workflow solution for creating AI-generated and human facilitated
|
||||||
|
compositions. Additional enhancements have been made as well, improving safety,
|
||||||
|
ease of use, and installation.
|
||||||
|
|
||||||
|
Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
|
||||||
|
512x768 image (and less for smaller images), and is compatible with
|
||||||
|
Windows/Linux/Mac (M1 & M2).
|
||||||
|
|
||||||
|
You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
|
||||||
|
introduces the main WebUI enhancement for version 2.2 -
|
||||||
|
[The Unified Canvas](https://invoke-ai.github.io/InvokeAI/features/UNIFIED_CANVAS/).
|
||||||
|
This new workflow is the biggest enhancement added to the WebUI to date, and
|
||||||
|
unlocks a stunning amount of potential for users to create and iterate on their
|
||||||
|
creations. The following sections describe what's new for InvokeAI.
|
||||||
|
|
||||||
|
## v2.2.0 <small>(2 December 2022)</small>
|
||||||
|
|
||||||
|
With InvokeAI 2.2, this project now provides enthusiasts and professionals a
|
||||||
|
robust workflow solution for creating AI-generated and human facilitated
|
||||||
|
compositions. Additional enhancements have been made as well, improving safety,
|
||||||
|
ease of use, and installation.
|
||||||
|
|
||||||
|
Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
|
||||||
|
512x768 image (and less for smaller images), and is compatible with
|
||||||
|
Windows/Linux/Mac (M1 & M2).
|
||||||
|
|
||||||
|
You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
|
||||||
|
introduces the main WebUI enhancement for version 2.2 -
|
||||||
|
[The Unified Canvas](features/UNIFIED_CANVAS.md). This new workflow is the
|
||||||
|
biggest enhancement added to the WebUI to date, and unlocks a stunning amount of
|
||||||
|
potential for users to create and iterate on their creations. The following
|
||||||
|
sections describe what's new for InvokeAI.
|
||||||
|
|
||||||
|
## v2.1.3 <small>(13 November 2022)</small>
|
||||||
|
|
||||||
|
- A choice of installer scripts that automate installation and configuration.
|
||||||
|
See
|
||||||
|
[Installation](installation/index.md).
|
||||||
|
- A streamlined manual installation process that works for both Conda and
|
||||||
|
PIP-only installs. See
|
||||||
|
[Manual Installation](installation/INSTALL_MANUAL.md).
|
||||||
|
- The ability to save frequently-used startup options (model to load, steps,
|
||||||
|
sampler, etc) in a `.invokeai` file. See
|
||||||
|
[Client](features/CLI.md)
|
||||||
|
- Support for AMD GPU cards (non-CUDA) on Linux machines.
|
||||||
|
- Multiple bugs and edge cases squashed.
|
||||||
|
|
||||||
## v2.1.0 <small>(2 November 2022)</small>
|
## v2.1.0 <small>(2 November 2022)</small>
|
||||||
|
|
||||||
- update mac instructions to use invokeai for env name by @willwillems in
|
- update mac instructions to use invokeai for env name by @willwillems in #1030
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1030
|
- Update .gitignore by @blessedcoolant in #1040
|
||||||
- Update .gitignore by @blessedcoolant in
|
- reintroduce fix for m1 from #579 missing after merge by @skurovec in #1056
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1040
|
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in #1060
|
||||||
- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579
|
- Print out the device type which is used by @manzke in #1073
|
||||||
missing after merge by @skurovec in
|
- Hires Addition by @hipsterusername in #1063
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1056
|
|
||||||
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1060
|
|
||||||
- Print out the device type which is used by @manzke in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1073
|
|
||||||
- Hires Addition by @hipsterusername in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1063
|
|
||||||
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
||||||
@skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
|
@skurovec in #1081
|
||||||
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
||||||
warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
|
warning by @db3000 in #1077
|
||||||
- fix noisy images at high step counts by @lstein in
|
- fix noisy images at high step counts by @lstein in #1086
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1086
|
- Generalize facetool strength argument by @db3000 in #1078
|
||||||
- Generalize facetool strength argument by @db3000 in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1078
|
|
||||||
- Enable fast switching among models at the invoke> command line by @lstein in
|
- Enable fast switching among models at the invoke> command line by @lstein in
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1066
|
#1066
|
||||||
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in
|
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in #1095
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1095
|
- Update generate.py by @unreleased in #1109
|
||||||
- Update generate.py by @unreleased in
|
- Update 'ldm' env to 'invokeai' in troubleshooting steps by @19wolf in #1125
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1109
|
- Fixed documentation typos and resolved merge conflicts by @rupeshs in #1123
|
||||||
- Update 'ldm' env to 'invokeai' in troubleshooting steps by @19wolf in
|
- Fix broken doc links, fix malaprop in the project subtitle by @majick in #1131
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1125
|
- Only output facetool parameters if enhancing faces by @db3000 in #1119
|
||||||
- Fixed documentation typos and resolved merge conflicts by @rupeshs in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1123
|
|
||||||
- Fix broken doc links, fix malaprop in the project subtitle by @majick in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1131
|
|
||||||
- Only output facetool parameters if enhancing faces by @db3000 in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1119
|
|
||||||
- Update gitignore to ignore codeformer weights at new location by
|
- Update gitignore to ignore codeformer weights at new location by
|
||||||
@spezialspezial in https://github.com/invoke-ai/InvokeAI/pull/1136
|
@spezialspezial in #1136
|
||||||
- fix links to point to invoke-ai.github.io #1117 by @mauwii in
|
- fix links to point to invoke-ai.github.io #1117 by @mauwii in #1143
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1143
|
- Rework-mkdocs by @mauwii in #1144
|
||||||
- Rework-mkdocs by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1144
|
|
||||||
- add option to CLI and pngwriter that allows user to set PNG compression level
|
- add option to CLI and pngwriter that allows user to set PNG compression level
|
||||||
by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
|
by @lstein in #1127
|
||||||
- Fix img2img DDIM index out of bound by @wfng92 in
|
- Fix img2img DDIM index out of bound by @wfng92 in #1137
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1137
|
- Fix gh actions by @mauwii in #1128
|
||||||
- Fix gh actions by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1128
|
- update mac instructions to use invokeai for env name by @willwillems in #1030
|
||||||
- update mac instructions to use invokeai for env name by @willwillems in
|
- Update .gitignore by @blessedcoolant in #1040
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1030
|
- reintroduce fix for m1 from #579 missing after merge by @skurovec in #1056
|
||||||
- Update .gitignore by @blessedcoolant in
|
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in #1060
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1040
|
- Print out the device type which is used by @manzke in #1073
|
||||||
- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579
|
- Hires Addition by @hipsterusername in #1063
|
||||||
missing after merge by @skurovec in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1056
|
|
||||||
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1060
|
|
||||||
- Print out the device type which is used by @manzke in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1073
|
|
||||||
- Hires Addition by @hipsterusername in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1063
|
|
||||||
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
||||||
@skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
|
@skurovec in #1081
|
||||||
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
||||||
warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
|
warning by @db3000 in #1077
|
||||||
- fix noisy images at high step counts by @lstein in
|
- fix noisy images at high step counts by @lstein in #1086
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1086
|
- Generalize facetool strength argument by @db3000 in #1078
|
||||||
- Generalize facetool strength argument by @db3000 in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1078
|
|
||||||
- Enable fast switching among models at the invoke> command line by @lstein in
|
- Enable fast switching among models at the invoke> command line by @lstein in
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1066
|
#1066
|
||||||
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in
|
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in #1095
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1095
|
- Fixed documentation typos and resolved merge conflicts by @rupeshs in #1123
|
||||||
- Fixed documentation typos and resolved merge conflicts by @rupeshs in
|
- Only output facetool parameters if enhancing faces by @db3000 in #1119
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1123
|
|
||||||
- Only output facetool parameters if enhancing faces by @db3000 in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1119
|
|
||||||
- add option to CLI and pngwriter that allows user to set PNG compression level
|
- add option to CLI and pngwriter that allows user to set PNG compression level
|
||||||
by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
|
by @lstein in #1127
|
||||||
- Fix img2img DDIM index out of bound by @wfng92 in
|
- Fix img2img DDIM index out of bound by @wfng92 in #1137
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1137
|
- Add text prompt to inpaint mask support by @lstein in #1133
|
||||||
- Add text prompt to inpaint mask support by @lstein in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1133
|
|
||||||
- Respect http[s] protocol when making socket.io middleware by @damian0815 in
|
- Respect http[s] protocol when making socket.io middleware by @damian0815 in
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/976
|
#976
|
||||||
- WebUI: Adds Codeformer support by @psychedelicious in
|
- WebUI: Adds Codeformer support by @psychedelicious in #1151
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1151
|
- Skips normalizing prompts for web UI metadata by @psychedelicious in #1165
|
||||||
- Skips normalizing prompts for web UI metadata by @psychedelicious in
|
- Add Asymmetric Tiling by @carson-katri in #1132
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1165
|
- Web UI: Increases max CFG Scale to 200 by @psychedelicious in #1172
|
||||||
- Add Asymmetric Tiling by @carson-katri in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1132
|
|
||||||
- Web UI: Increases max CFG Scale to 200 by @psychedelicious in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1172
|
|
||||||
- Corrects color channels in face restoration; Fixes #1167 by @psychedelicious
|
- Corrects color channels in face restoration; Fixes #1167 by @psychedelicious
|
||||||
in https://github.com/invoke-ai/InvokeAI/pull/1175
|
in #1175
|
||||||
- Flips channels using array slicing instead of using OpenCV by @psychedelicious
|
- Flips channels using array slicing instead of using OpenCV by @psychedelicious
|
||||||
in https://github.com/invoke-ai/InvokeAI/pull/1178
|
in #1178
|
||||||
- Fix typo in docs: s/Formally/Formerly by @noodlebox in
|
- Fix typo in docs: s/Formally/Formerly by @noodlebox in #1176
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1176
|
- fix clipseg loading problems by @lstein in #1177
|
||||||
- fix clipseg loading problems by @lstein in
|
- Correct color channels in upscale using array slicing by @wfng92 in #1181
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1177
|
|
||||||
- Correct color channels in upscale using array slicing by @wfng92 in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1181
|
|
||||||
- Web UI: Filters existing images when adding new images; Fixes #1085 by
|
- Web UI: Filters existing images when adding new images; Fixes #1085 by
|
||||||
@psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1171
|
@psychedelicious in #1171
|
||||||
- fix a number of bugs in textual inversion by @lstein in
|
- fix a number of bugs in textual inversion by @lstein in #1190
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1190
|
- Improve !fetch, add !replay command by @ArDiouscuros in #882
|
||||||
- Improve !fetch, add !replay command by @ArDiouscuros in
|
- Fix generation of image with s>1000 by @holstvoogd in #951
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/882
|
- Web UI: Gallery improvements by @psychedelicious in #1198
|
||||||
- Fix generation of image with s>1000 by @holstvoogd in
|
- Update CLI.md by @krummrey in #1211
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/951
|
- outcropping improvements by @lstein in #1207
|
||||||
- Web UI: Gallery improvements by @psychedelicious in
|
- add support for loading VAE autoencoders by @lstein in #1216
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1198
|
- remove duplicate fix_func for MPS by @wfng92 in #1210
|
||||||
- Update CLI.md by @krummrey in https://github.com/invoke-ai/InvokeAI/pull/1211
|
- Metadata storage and retrieval fixes by @lstein in #1204
|
||||||
- outcropping improvements by @lstein in
|
- nix: add shell.nix file by @Cloudef in #1170
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1207
|
- Web UI: Changes vite dist asset paths to relative by @psychedelicious in #1185
|
||||||
- add support for loading VAE autoencoders by @lstein in
|
- Web UI: Removes isDisabled from PromptInput by @psychedelicious in #1187
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1216
|
|
||||||
- remove duplicate fix_func for MPS by @wfng92 in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1210
|
|
||||||
- Metadata storage and retrieval fixes by @lstein in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1204
|
|
||||||
- nix: add shell.nix file by @Cloudef in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1170
|
|
||||||
- Web UI: Changes vite dist asset paths to relative by @psychedelicious in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1185
|
|
||||||
- Web UI: Removes isDisabled from PromptInput by @psychedelicious in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1187
|
|
||||||
- Allow user to generate images with initial noise as on M1 / mps system by
|
- Allow user to generate images with initial noise as on M1 / mps system by
|
||||||
@ArDiouscuros in https://github.com/invoke-ai/InvokeAI/pull/981
|
@ArDiouscuros in #981
|
||||||
- feat: adding filename format template by @plucked in
|
- feat: adding filename format template by @plucked in #968
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/968
|
- Web UI: Fixes broken bundle by @psychedelicious in #1242
|
||||||
- Web UI: Fixes broken bundle by @psychedelicious in
|
- Support runwayML custom inpainting model by @lstein in #1243
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1242
|
- Update IMG2IMG.md by @talitore in #1262
|
||||||
- Support runwayML custom inpainting model by @lstein in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1243
|
|
||||||
- Update IMG2IMG.md by @talitore in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1262
|
|
||||||
- New dockerfile - including a build- and a run- script as well as a GH-Action
|
- New dockerfile - including a build- and a run- script as well as a GH-Action
|
||||||
by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1233
|
by @mauwii in #1233
|
||||||
- cut over from karras to model noise schedule for higher steps by @lstein in
|
- cut over from karras to model noise schedule for higher steps by @lstein in
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1222
|
#1222
|
||||||
- Prompt tweaks by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1268
|
- Prompt tweaks by @lstein in #1268
|
||||||
- Outpainting implementation by @Kyle0654 in
|
- Outpainting implementation by @Kyle0654 in #1251
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1251
|
- fixing aspect ratio on hires by @tjennings in #1249
|
||||||
- fixing aspect ratio on hires by @tjennings in
|
- Fix-build-container-action by @mauwii in #1274
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1249
|
- handle all unicode characters by @damian0815 in #1276
|
||||||
- Fix-build-container-action by @mauwii in
|
- adds models.user.yml to .gitignore by @JakeHL in #1281
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1274
|
- remove debug branch, set fail-fast to false by @mauwii in #1284
|
||||||
- handle all unicode characters by @damian0815 in
|
- Protect-secrets-on-pr by @mauwii in #1285
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1276
|
- Web UI: Adds initial inpainting implementation by @psychedelicious in #1225
|
||||||
- adds models.user.yml to .gitignore by @JakeHL in
|
- fix environment-mac.yml - tested on x64 and arm64 by @mauwii in #1289
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1281
|
- Use proper authentication to download model by @mauwii in #1287
|
||||||
- remove debug branch, set fail-fast to false by @mauwii in
|
- Prevent indexing error for mode RGB by @spezialspezial in #1294
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1284
|
|
||||||
- Protect-secrets-on-pr by @mauwii in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1285
|
|
||||||
- Web UI: Adds initial inpainting implementation by @psychedelicious in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1225
|
|
||||||
- fix environment-mac.yml - tested on x64 and arm64 by @mauwii in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1289
|
|
||||||
- Use proper authentication to download model by @mauwii in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1287
|
|
||||||
- Prevent indexing error for mode RGB by @spezialspezial in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1294
|
|
||||||
- Integrate sd-v1-5 model into test matrix (easily expandable), remove
|
- Integrate sd-v1-5 model into test matrix (easily expandable), remove
|
||||||
unecesarry caches by @mauwii in
|
unecesarry caches by @mauwii in #1293
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1293
|
- add --no-interactive to configure_invokeai step by @mauwii in #1302
|
||||||
- add --no-interactive to preload_models step by @mauwii in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1302
|
|
||||||
- 1-click installer and updater. Uses micromamba to install git and conda into a
|
- 1-click installer and updater. Uses micromamba to install git and conda into a
|
||||||
contained environment (if necessary) before running the normal installation
|
contained environment (if necessary) before running the normal installation
|
||||||
script by @cmdr2 in https://github.com/invoke-ai/InvokeAI/pull/1253
|
script by @cmdr2 in #1253
|
||||||
- preload_models.py script downloads the weight files by @lstein in
|
- configure_invokeai.py script downloads the weight files by @lstein in #1290
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1290
|
|
||||||
|
|
||||||
## v2.0.1 <small>(13 October 2022)</small>
|
## v2.0.1 <small>(13 October 2022)</small>
|
||||||
|
|
||||||
|
BIN
docs/assets/canvas/biker_granny.png
Normal file
After Width: | Height: | Size: 359 KiB |
BIN
docs/assets/canvas/biker_jacket_granny.png
Normal file
After Width: | Height: | Size: 528 KiB |
BIN
docs/assets/canvas/mask_granny.png
Normal file
After Width: | Height: | Size: 601 KiB |
BIN
docs/assets/canvas/staging_area.png
Normal file
After Width: | Height: | Size: 59 KiB |
BIN
docs/assets/concepts/image1.png
Normal file
After Width: | Height: | Size: 122 KiB |
BIN
docs/assets/concepts/image2.png
Normal file
After Width: | Height: | Size: 128 KiB |
BIN
docs/assets/concepts/image3.png
Normal file
After Width: | Height: | Size: 99 KiB |
BIN
docs/assets/concepts/image4.png
Normal file
After Width: | Height: | Size: 112 KiB |
BIN
docs/assets/concepts/image5.png
Normal file
After Width: | Height: | Size: 107 KiB |
BIN
docs/assets/invoke_ai_banner.png
Normal file
After Width: | Height: | Size: 169 KiB |
@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
title: CLI
|
title: Command-Line Interface
|
||||||
---
|
---
|
||||||
|
|
||||||
# :material-bash: CLI
|
# :material-bash: CLI
|
||||||
@ -130,20 +130,34 @@ file should contain the startup options as you would type them on the
|
|||||||
command line (`--steps=10 --grid`), one argument per line, or a
|
command line (`--steps=10 --grid`), one argument per line, or a
|
||||||
mixture of both using any of the accepted command switch formats:
|
mixture of both using any of the accepted command switch formats:
|
||||||
|
|
||||||
!!! example ""
|
!!! example "my unmodified initialization file"
|
||||||
|
|
||||||
```bash
|
```bash title="~/.invokeai" linenums="1"
|
||||||
--web
|
# InvokeAI initialization file
|
||||||
--steps=28
|
# This is the InvokeAI initialization file, which contains command-line default values.
|
||||||
--grid
|
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
|
||||||
-f 0.6 -C 11.0 -A k_euler_a
|
# or renaming it and then running configure_invokeai.py again.
|
||||||
|
|
||||||
|
# The --root option below points to the folder in which InvokeAI stores its models, configs and outputs.
|
||||||
|
--root="/Users/mauwii/invokeai"
|
||||||
|
|
||||||
|
# the --outdir option controls the default location of image files.
|
||||||
|
--outdir="/Users/mauwii/invokeai/outputs"
|
||||||
|
|
||||||
|
# You may place other frequently-used startup commands here, one or more per line.
|
||||||
|
# Examples:
|
||||||
|
# --web --host=0.0.0.0
|
||||||
|
# --steps=20
|
||||||
|
# -Ak_euler_a -C10.0
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that the initialization file only accepts the command line arguments.
|
!!! note
|
||||||
There are additional arguments that you can provide on the `invoke>` command
|
|
||||||
line (such as `-n` or `--iterations`) that cannot be entered into this file.
|
The initialization file only accepts the command line arguments.
|
||||||
Also be alert for empty blank lines at the end of the file, which will cause
|
There are additional arguments that you can provide on the `invoke>` command
|
||||||
an arguments error at startup time.
|
line (such as `-n` or `--iterations`) that cannot be entered into this file.
|
||||||
|
Also be alert for empty blank lines at the end of the file, which will cause
|
||||||
|
an arguments error at startup time.
|
||||||
|
|
||||||
## List of prompt arguments
|
## List of prompt arguments
|
||||||
|
|
||||||
@ -195,15 +209,17 @@ Here are the invoke> command that apply to txt2img:
|
|||||||
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
|
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
|
||||||
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
|
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
|
||||||
|
|
||||||
Note that the width and height of the image must be multiples of 64. You can
|
!!! note
|
||||||
provide different values, but they will be rounded down to the nearest multiple
|
|
||||||
of 64.
|
|
||||||
|
|
||||||
### This is an example of img2img:
|
the width and height of the image must be multiples of 64. You can
|
||||||
|
provide different values, but they will be rounded down to the nearest multiple
|
||||||
|
of 64.
|
||||||
|
|
||||||
```
|
!!! example "This is a example of img2img"
|
||||||
invoke> waterfall and rainbow -I./vacation-photo.png -W640 -H480 --fit
|
|
||||||
```
|
```bash
|
||||||
|
invoke> waterfall and rainbow -I./vacation-photo.png -W640 -H480 --fit
|
||||||
|
```
|
||||||
|
|
||||||
This will modify the indicated vacation photograph by making it more like the
|
This will modify the indicated vacation photograph by making it more like the
|
||||||
prompt. Results will vary greatly depending on what is in the image. We also ask
|
prompt. Results will vary greatly depending on what is in the image. We also ask
|
||||||
@ -253,7 +269,7 @@ description of the part of the image to replace. For example, if you have an
|
|||||||
image of a breakfast plate with a bagel, toast and scrambled eggs, you can
|
image of a breakfast plate with a bagel, toast and scrambled eggs, you can
|
||||||
selectively mask the bagel and replace it with a piece of cake this way:
|
selectively mask the bagel and replace it with a piece of cake this way:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel
|
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -265,20 +281,26 @@ are getting too much or too little masking you can adjust the threshold down (to
|
|||||||
get more mask), or up (to get less). In this example, by passing `-tm` a higher
|
get more mask), or up (to get less). In this example, by passing `-tm` a higher
|
||||||
value, we are insisting on a more stringent classification.
|
value, we are insisting on a more stringent classification.
|
||||||
|
|
||||||
```
|
```bash
|
||||||
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel 0.6
|
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel 0.6
|
||||||
```
|
```
|
||||||
|
|
||||||
# Other Commands
|
### Custom Styles and Subjects
|
||||||
|
|
||||||
|
You can load and use hundreds of community-contributed Textual
|
||||||
|
Inversion models just by typing the appropriate trigger phrase. Please
|
||||||
|
see [Concepts Library](CONCEPTS.md) for more details.
|
||||||
|
|
||||||
|
## Other Commands
|
||||||
|
|
||||||
The CLI offers a number of commands that begin with "!".
|
The CLI offers a number of commands that begin with "!".
|
||||||
|
|
||||||
## Postprocessing images
|
### Postprocessing images
|
||||||
|
|
||||||
To postprocess a file using face restoration or upscaling, use the `!fix`
|
To postprocess a file using face restoration or upscaling, use the `!fix`
|
||||||
command.
|
command.
|
||||||
|
|
||||||
### `!fix`
|
#### `!fix`
|
||||||
|
|
||||||
This command runs a post-processor on a previously-generated image. It takes a
|
This command runs a post-processor on a previously-generated image. It takes a
|
||||||
PNG filename or path and applies your choice of the `-U`, `-G`, or `--embiggen`
|
PNG filename or path and applies your choice of the `-U`, `-G`, or `--embiggen`
|
||||||
@ -305,19 +327,19 @@ Some examples:
|
|||||||
[1] outputs/img-samples/000017.4829112.gfpgan-00.png: !fix "outputs/img-samples/0000045.4829112.png" -s 50 -S -W 512 -H 512 -C 7.5 -A k_lms -G 0.8
|
[1] outputs/img-samples/000017.4829112.gfpgan-00.png: !fix "outputs/img-samples/0000045.4829112.png" -s 50 -S -W 512 -H 512 -C 7.5 -A k_lms -G 0.8
|
||||||
```
|
```
|
||||||
|
|
||||||
### !mask
|
#### `!mask`
|
||||||
|
|
||||||
This command takes an image, a text prompt, and uses the `clipseg` algorithm to
|
This command takes an image, a text prompt, and uses the `clipseg` algorithm to
|
||||||
automatically generate a mask of the area that matches the text prompt. It is
|
automatically generate a mask of the area that matches the text prompt. It is
|
||||||
useful for debugging the text masking process prior to inpainting with the
|
useful for debugging the text masking process prior to inpainting with the
|
||||||
`--text_mask` argument. See [INPAINTING.md] for details.
|
`--text_mask` argument. See [INPAINTING.md] for details.
|
||||||
|
|
||||||
## Model selection and importation
|
### Model selection and importation
|
||||||
|
|
||||||
The CLI allows you to add new models on the fly, as well as to switch among them
|
The CLI allows you to add new models on the fly, as well as to switch among them
|
||||||
rapidly without leaving the script.
|
rapidly without leaving the script.
|
||||||
|
|
||||||
### !models
|
#### `!models`
|
||||||
|
|
||||||
This prints out a list of the models defined in `config/models.yaml'. The active
|
This prints out a list of the models defined in `config/models.yaml'. The active
|
||||||
model is bold-faced
|
model is bold-faced
|
||||||
@ -330,7 +352,7 @@ laion400m not loaded <no description>
|
|||||||
waifu-diffusion not loaded Waifu Diffusion v1.3
|
waifu-diffusion not loaded Waifu Diffusion v1.3
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
### !switch <model>
|
#### `!switch <model>`
|
||||||
|
|
||||||
This quickly switches from one model to another without leaving the CLI script.
|
This quickly switches from one model to another without leaving the CLI script.
|
||||||
`invoke.py` uses a memory caching system; once a model has been loaded,
|
`invoke.py` uses a memory caching system; once a model has been loaded,
|
||||||
@ -375,7 +397,7 @@ laion400m not loaded <no description>
|
|||||||
waifu-diffusion cached Waifu Diffusion v1.3
|
waifu-diffusion cached Waifu Diffusion v1.3
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
### !import_model <path/to/model/weights>
|
#### `!import_model <path/to/model/weights>`
|
||||||
|
|
||||||
This command imports a new model weights file into InvokeAI, makes it available
|
This command imports a new model weights file into InvokeAI, makes it available
|
||||||
for image generation within the script, and writes out the configuration for the
|
for image generation within the script, and writes out the configuration for the
|
||||||
@ -425,7 +447,7 @@ OK to import [n]? <b>y</b>
|
|||||||
invoke>
|
invoke>
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
###!edit_model <name_of_model>
|
#### `!edit_model <name_of_model>`
|
||||||
|
|
||||||
The `!edit_model` command can be used to modify a model that is already defined
|
The `!edit_model` command can be used to modify a model that is already defined
|
||||||
in `config/models.yaml`. Call it with the short name of the model you wish to
|
in `config/models.yaml`. Call it with the short name of the model you wish to
|
||||||
@ -462,12 +484,12 @@ text... Outputs: [2] outputs/img-samples/000018.2273800735.embiggen-00.png: !fix
|
|||||||
"outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512
|
"outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512
|
||||||
-H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25 ```
|
-H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25 ```
|
||||||
|
|
||||||
## History processing
|
### History processing
|
||||||
|
|
||||||
The CLI provides a series of convenient commands for reviewing previous actions,
|
The CLI provides a series of convenient commands for reviewing previous actions,
|
||||||
retrieving them, modifying them, and re-running them.
|
retrieving them, modifying them, and re-running them.
|
||||||
|
|
||||||
### !history
|
#### `!history`
|
||||||
|
|
||||||
The invoke script keeps track of all the commands you issue during a session,
|
The invoke script keeps track of all the commands you issue during a session,
|
||||||
allowing you to re-run them. On Mac and Linux systems, it also writes the
|
allowing you to re-run them. On Mac and Linux systems, it also writes the
|
||||||
@ -479,20 +501,22 @@ during the session (Windows), or the most recent 1000 commands (Mac|Linux). You
|
|||||||
can then repeat a command by using the command `!NNN`, where "NNN" is the
|
can then repeat a command by using the command `!NNN`, where "NNN" is the
|
||||||
history line number. For example:
|
history line number. For example:
|
||||||
|
|
||||||
```bash
|
!!! example ""
|
||||||
invoke> !history
|
|
||||||
...
|
|
||||||
[14] happy woman sitting under tree wearing broad hat and flowing garment
|
|
||||||
[15] beautiful woman sitting under tree wearing broad hat and flowing garment
|
|
||||||
[18] beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6
|
|
||||||
[20] watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
|
||||||
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
|
||||||
...
|
|
||||||
invoke> !20
|
|
||||||
invoke> watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
|
||||||
```
|
|
||||||
|
|
||||||
### !fetch
|
```bash
|
||||||
|
invoke> !history
|
||||||
|
...
|
||||||
|
[14] happy woman sitting under tree wearing broad hat and flowing garment
|
||||||
|
[15] beautiful woman sitting under tree wearing broad hat and flowing garment
|
||||||
|
[18] beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6
|
||||||
|
[20] watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||||
|
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||||
|
...
|
||||||
|
invoke> !20
|
||||||
|
invoke> watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||||
|
```
|
||||||
|
|
||||||
|
####`!fetch`
|
||||||
|
|
||||||
This command retrieves the generation parameters from a previously generated
|
This command retrieves the generation parameters from a previously generated
|
||||||
image and either loads them into the command line (Linux|Mac), or prints them
|
image and either loads them into the command line (Linux|Mac), or prints them
|
||||||
@ -502,33 +526,36 @@ a folder with image png files, and wildcard \*.png to retrieve the dream command
|
|||||||
used to generate the images, and save them to a file commands.txt for further
|
used to generate the images, and save them to a file commands.txt for further
|
||||||
processing.
|
processing.
|
||||||
|
|
||||||
This example loads the generation command for a single png file:
|
!!! example "load the generation command for a single png file"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
invoke> !fetch 0000015.8929913.png
|
invoke> !fetch 0000015.8929913.png
|
||||||
# the script returns the next line, ready for editing and running:
|
# the script returns the next line, ready for editing and running:
|
||||||
invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
|
invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
|
||||||
```
|
```
|
||||||
|
|
||||||
This one fetches the generation commands from a batch of files and stores them
|
!!! example "fetch the generation commands from a batch of files and store them into `selected.txt`"
|
||||||
into `selected.txt`:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
invoke> !fetch outputs\selected-imgs\*.png selected.txt
|
invoke> !fetch outputs\selected-imgs\*.png selected.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
### !replay
|
#### `!replay`
|
||||||
|
|
||||||
This command replays a text file generated by !fetch or created manually
|
This command replays a text file generated by !fetch or created manually
|
||||||
|
|
||||||
```
|
!!! example
|
||||||
invoke> !replay outputs\selected-imgs\selected.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that these commands may behave unexpectedly if given a PNG file that was
|
```bash
|
||||||
not generated by InvokeAI.
|
invoke> !replay outputs\selected-imgs\selected.txt
|
||||||
|
```
|
||||||
|
|
||||||
### !search <search string>
|
!!! note
|
||||||
|
|
||||||
|
These commands may behave unexpectedly if given a PNG file that was
|
||||||
|
not generated by InvokeAI.
|
||||||
|
|
||||||
|
#### `!search <search string>`
|
||||||
|
|
||||||
This is similar to !history but it only returns lines that contain
|
This is similar to !history but it only returns lines that contain
|
||||||
`search string`. For example:
|
`search string`. For example:
|
||||||
@ -538,7 +565,7 @@ invoke> !search surreal
|
|||||||
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||||
```
|
```
|
||||||
|
|
||||||
### `!clear`
|
#### `!clear`
|
||||||
|
|
||||||
This clears the search history from memory and disk. Be advised that this
|
This clears the search history from memory and disk. Be advised that this
|
||||||
operation is irreversible and does not issue any warnings!
|
operation is irreversible and does not issue any warnings!
|
||||||
|
131
docs/features/CONCEPTS.md
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
---
|
||||||
|
title: Concepts Library
|
||||||
|
---
|
||||||
|
|
||||||
|
# :material-library-shelves: The Hugging Face Concepts Library and Importing Textual Inversion files
|
||||||
|
|
||||||
|
## Using Textual Inversion Files
|
||||||
|
|
||||||
|
Textual inversion (TI) files are small models that customize the output of
|
||||||
|
Stable Diffusion image generation. They can augment SD with specialized subjects
|
||||||
|
and artistic styles. They are also known as "embeds" in the machine learning
|
||||||
|
world.
|
||||||
|
|
||||||
|
Each TI file introduces one or more vocabulary terms to the SD model. These are
|
||||||
|
known in InvokeAI as "triggers." Triggers are often, but not always, denoted
|
||||||
|
using angle brackets as in "<trigger-phrase>". The two most common type of
|
||||||
|
TI files that you'll encounter are `.pt` and `.bin` files, which are produced by
|
||||||
|
different TI training packages. InvokeAI supports both formats, but its
|
||||||
|
[built-in TI training system](TEXTUAL_INVERSION.md) produces `.pt`.
|
||||||
|
|
||||||
|
The [Hugging Face company](https://huggingface.co/sd-concepts-library) has
|
||||||
|
amassed a large ligrary of >800 community-contributed TI files covering a
|
||||||
|
broad range of subjects and styles. InvokeAI has built-in support for this
|
||||||
|
library which downloads and merges TI files automatically upon request. You can
|
||||||
|
also install your own or others' TI files by placing them in a designated
|
||||||
|
directory.
|
||||||
|
|
||||||
|
### An Example
|
||||||
|
|
||||||
|
Here are a few examples to illustrate how it works. All these images were
|
||||||
|
generated using the command-line client and the Stable Diffusion 1.5 model:
|
||||||
|
|
||||||
|
| Japanese gardener | Japanese gardener <ghibli-face> | Japanese gardener <hoi4-leaders> | Japanese gardener <cartoona-animals> |
|
||||||
|
| :--------------------------------: | :-----------------------------------: | :------------------------------------: | :----------------------------------------: |
|
||||||
|
|  |  |  |  |
|
||||||
|
|
||||||
|
You can also combine styles and concepts:
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
| A portrait of <alf> in <cartoona-animal> style |
|
||||||
|
| :--------------------------------------------------------: |
|
||||||
|
|  |
|
||||||
|
</figure>
|
||||||
|
## Using a Hugging Face Concept
|
||||||
|
|
||||||
|
!!! warning "Authenticating to HuggingFace"
|
||||||
|
|
||||||
|
Some concepts require valid authentication to HuggingFace. Without it, they will not be downloaded
|
||||||
|
and will be silently ignored.
|
||||||
|
|
||||||
|
If you used an installer to install InvokeAI, you may have already set a HuggingFace token.
|
||||||
|
If you skipped this step, you can:
|
||||||
|
|
||||||
|
- run the InvokeAI configuration script again (if you used a manual installer): `scripts/configure_invokeai.py`
|
||||||
|
- set one of the `HUGGINGFACE_TOKEN` or `HUGGING_FACE_HUB_TOKEN` environment variables to contain your token
|
||||||
|
|
||||||
|
Finally, if you already used any HuggingFace library on your computer, you might already have a token
|
||||||
|
in your local cache. Check for a hidden `.huggingface` directory in your home folder. If it
|
||||||
|
contains a `token` file, then you are all set.
|
||||||
|
|
||||||
|
|
||||||
|
Hugging Face TI concepts are downloaded and installed automatically as you
|
||||||
|
require them. This requires your machine to be connected to the Internet. To
|
||||||
|
find out what each concept is for, you can browse the
|
||||||
|
[Hugging Face concepts library](https://huggingface.co/sd-concepts-library) and
|
||||||
|
look at examples of what each concept produces.
|
||||||
|
|
||||||
|
When you have an idea of a concept you wish to try, go to the command-line
|
||||||
|
client (CLI) and type a `<` character and the beginning of the Hugging Face
|
||||||
|
concept name you wish to load. Press ++tab++, and the CLI will show you all
|
||||||
|
matching concepts. You can also type `<` and hit ++tab++ to get a listing of all
|
||||||
|
~800 concepts, but be prepared to scroll up to see them all! If there is more
|
||||||
|
than one match you can continue to type and ++tab++ until the concept is
|
||||||
|
completed.
|
||||||
|
|
||||||
|
!!! example
|
||||||
|
|
||||||
|
if you type in `<x` and hit ++tab++, you'll be prompted with the completions:
|
||||||
|
|
||||||
|
```py
|
||||||
|
<xatu2> <xatu> <xbh> <xi> <xidiversity> <xioboma> <xuna> <xyz>
|
||||||
|
```
|
||||||
|
|
||||||
|
Now type `id` and press ++tab++. It will be autocompleted to `<xidiversity>`
|
||||||
|
because this is a unique match.
|
||||||
|
|
||||||
|
Finish your prompt and generate as usual. You may include multiple concept terms
|
||||||
|
in the prompt.
|
||||||
|
|
||||||
|
If you have never used this concept before, you will see a message that the TI
|
||||||
|
model is being downloaded and installed. After this, the concept will be saved
|
||||||
|
locally (in the `models/sd-concepts-library` directory) for future use.
|
||||||
|
|
||||||
|
Several steps happen during downloading and installation, including a scan of
|
||||||
|
the file for malicious code. Should any errors occur, you will be warned and the
|
||||||
|
concept will fail to load. Generation will then continue treating the trigger
|
||||||
|
term as a normal string of characters (e.g. as literal `<ghibli-face>`).
|
||||||
|
|
||||||
|
You can also use `<concept-names>` in the WebGUI's prompt textbox. There is no
|
||||||
|
autocompletion at this time.
|
||||||
|
|
||||||
|
## Installing your Own TI Files
|
||||||
|
|
||||||
|
You may install any number of `.pt` and `.bin` files simply by copying them into
|
||||||
|
the `embeddings` directory of the InvokeAI runtime directory (usually `invokeai`
|
||||||
|
in your home directory). You may create subdirectories in order to organize the
|
||||||
|
files in any way you wish. Be careful not to overwrite one file with another.
|
||||||
|
For example, TI files generated by the Hugging Face toolkit share the named
|
||||||
|
`learned_embedding.bin`. You can use subdirectories to keep them distinct.
|
||||||
|
|
||||||
|
At startup time, InvokeAI will scan the `embeddings` directory and load any TI
|
||||||
|
files it finds there. At startup you will see a message similar to this one:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
>> Current embedding manager terms: *, <HOI4-Leader>, <princess-knight>
|
||||||
|
```
|
||||||
|
|
||||||
|
Note the `*` trigger term. This is a placeholder term that many early TI
|
||||||
|
tutorials taught people to use rather than a more descriptive term.
|
||||||
|
Unfortunately, if you have multiple TI files that all use this term, only the
|
||||||
|
first one loaded will be triggered by use of the term.
|
||||||
|
|
||||||
|
To avoid this problem, you can use the `merge_embeddings.py` script to merge two
|
||||||
|
or more TI files together. If it encounters a collision of terms, the script
|
||||||
|
will prompt you to select new terms that do not collide. See
|
||||||
|
[Textual Inversion](TEXTUAL_INVERSION.md) for details.
|
||||||
|
|
||||||
|
## Further Reading
|
||||||
|
|
||||||
|
Please see [the repository](https://github.com/rinongal/textual_inversion) and
|
||||||
|
associated paper for details and limitations.
|
@ -85,7 +85,7 @@ increasing size, every tile after the first in a row or column
|
|||||||
effectively only covers an extra `1 - overlap_ratio` on each axis. If
|
effectively only covers an extra `1 - overlap_ratio` on each axis. If
|
||||||
the input/`--init_img` is same size as a tile, the ideal (for time)
|
the input/`--init_img` is same size as a tile, the ideal (for time)
|
||||||
scaling factors with the default overlap (0.25) are 1.75, 2.5, 3.25,
|
scaling factors with the default overlap (0.25) are 1.75, 2.5, 3.25,
|
||||||
4.0 etc..
|
4.0, etc.
|
||||||
|
|
||||||
`-embiggen_tiles <spaced list of tiles>`
|
`-embiggen_tiles <spaced list of tiles>`
|
||||||
|
|
||||||
@ -100,6 +100,15 @@ Tiles are numbered starting with one, and left-to-right,
|
|||||||
top-to-bottom. So, if you are generating a 3x3 tiled image, the
|
top-to-bottom. So, if you are generating a 3x3 tiled image, the
|
||||||
middle row would be `4 5 6`.
|
middle row would be `4 5 6`.
|
||||||
|
|
||||||
|
`-embiggen_strength <strength>`
|
||||||
|
|
||||||
|
Another advanced option if you want to experiment with the strength parameter
|
||||||
|
that embiggen uses when it calls Img2Img. Values range from 0.0 to 1.0
|
||||||
|
and lower values preserve more of the character of the initial image.
|
||||||
|
Values that are too high will result in a completely different end image,
|
||||||
|
while values that are too low will result in an image not dissimilar to one
|
||||||
|
you would get with ESRGAN upscaling alone. The default value is 0.4.
|
||||||
|
|
||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
!!! example ""
|
!!! example ""
|
||||||
|
@ -12,21 +12,19 @@ stable diffusion to build the prompt on top of the image you provide, preserving
|
|||||||
the original's basic shape and layout. To use it, provide the `--init_img`
|
the original's basic shape and layout. To use it, provide the `--init_img`
|
||||||
option as shown here:
|
option as shown here:
|
||||||
|
|
||||||
```commandline
|
!!! example ""
|
||||||
tree on a hill with a river, nature photograph, national geographic -I./test-pictures/tree-and-river-sketch.png -f 0.85
|
|
||||||
```
|
|
||||||
|
|
||||||
This will take the original image shown here:
|
```commandline
|
||||||
|
tree on a hill with a river, nature photograph, national geographic -I./test-pictures/tree-and-river-sketch.png -f 0.85
|
||||||
|
```
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
{ width=320 }
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
and generate a new image based on it as shown here:
|
| original image | generated image |
|
||||||
|
| :------------: | :-------------: |
|
||||||
|
| { width=320 } | { width=320 } |
|
||||||
|
|
||||||
<figure markdown>
|
</figure>
|
||||||
{ width=320 }
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
The `--init_img` (`-I`) option gives the path to the seed picture. `--strength`
|
The `--init_img` (`-I`) option gives the path to the seed picture. `--strength`
|
||||||
(`-f`) controls how much the original will be modified, ranging from `0.0` (keep
|
(`-f`) controls how much the original will be modified, ranging from `0.0` (keep
|
||||||
@ -88,13 +86,15 @@ from a prompt. If the step count is 10, then the "latent space" (Stable
|
|||||||
Diffusion's internal representation of the image) for the prompt "fire" with
|
Diffusion's internal representation of the image) for the prompt "fire" with
|
||||||
seed `1592514025` develops something like this:
|
seed `1592514025` develops something like this:
|
||||||
|
|
||||||
```bash
|
!!! example ""
|
||||||
invoke> "fire" -s10 -W384 -H384 -S1592514025
|
|
||||||
```
|
|
||||||
|
|
||||||
<figure markdown>
|
```bash
|
||||||

|
invoke> "fire" -s10 -W384 -H384 -S1592514025
|
||||||
</figure>
|
```
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
{ width=720 }
|
||||||
|
</figure>
|
||||||
|
|
||||||
Put simply: starting from a frame of fuzz/static, SD finds details in each frame
|
Put simply: starting from a frame of fuzz/static, SD finds details in each frame
|
||||||
that it thinks look like "fire" and brings them a little bit more into focus,
|
that it thinks look like "fire" and brings them a little bit more into focus,
|
||||||
@ -109,25 +109,23 @@ into the sequence at the appropriate point, with just the right amount of noise.
|
|||||||
|
|
||||||
### A concrete example
|
### A concrete example
|
||||||
|
|
||||||
I want SD to draw a fire based on this hand-drawn image:
|
!!! example "I want SD to draw a fire based on this hand-drawn image"
|
||||||
|
|
||||||
<figure markdown>
|
{ align=left }
|
||||||

|
|
||||||
</figure>
|
|
||||||
|
|
||||||
Let's only do 10 steps, to make it easier to see what's happening. If strength
|
Let's only do 10 steps, to make it easier to see what's happening. If strength
|
||||||
is `0.7`, this is what the internal steps the algorithm has to take will look
|
is `0.7`, this is what the internal steps the algorithm has to take will look
|
||||||
like:
|
like:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
With strength `0.4`, the steps look more like this:
|
With strength `0.4`, the steps look more like this:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
Notice how much more fuzzy the starting image is for strength `0.7` compared to
|
Notice how much more fuzzy the starting image is for strength `0.7` compared to
|
||||||
`0.4`, and notice also how much longer the sequence is with `0.7`:
|
`0.4`, and notice also how much longer the sequence is with `0.7`:
|
||||||
|
@ -158,7 +158,7 @@ when filling in missing regions. It has an almost uncanny ability to blend the
|
|||||||
new regions with existing ones in a semantically coherent way.
|
new regions with existing ones in a semantically coherent way.
|
||||||
|
|
||||||
To install the inpainting model, follow the
|
To install the inpainting model, follow the
|
||||||
[instructions](../installation/INSTALLING_MODELS.md) for installing a new model.
|
[instructions](../installation/050_INSTALLING_MODELS.md) for installing a new model.
|
||||||
You may use either the CLI (`invoke.py` script) or directly edit the
|
You may use either the CLI (`invoke.py` script) or directly edit the
|
||||||
`configs/models.yaml` configuration file to do this. The main thing to watch out
|
`configs/models.yaml` configuration file to do this. The main thing to watch out
|
||||||
for is that the the model `config` option must be set up to use
|
for is that the the model `config` option must be set up to use
|
||||||
|
89
docs/features/NSFW.md
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
---
|
||||||
|
title: The NSFW Checker
|
||||||
|
---
|
||||||
|
|
||||||
|
# :material-image-off: NSFW Checker
|
||||||
|
|
||||||
|
## The NSFW ("Safety") Checker
|
||||||
|
|
||||||
|
The Stable Diffusion image generation models will produce sexual
|
||||||
|
imagery if deliberately prompted, and will occasionally produce such
|
||||||
|
images when this is not intended. Such images are colloquially known
|
||||||
|
as "Not Safe for Work" (NSFW). This behavior is due to the nature of
|
||||||
|
the training set that Stable Diffusion was trained on, which culled
|
||||||
|
millions of "aesthetic" images from the Internet.
|
||||||
|
|
||||||
|
You may not wish to be exposed to these images, and in some
|
||||||
|
jurisdictions it may be illegal to publicly distribute such imagery,
|
||||||
|
including mounting a publicly-available server that provides
|
||||||
|
unfiltered images to the public. Furthermore, the [Stable Diffusion
|
||||||
|
weights
|
||||||
|
License](https://github.com/invoke-ai/InvokeAI/blob/main/LICENSE-ModelWeights.txt)
|
||||||
|
forbids the model from being used to "exploit any of the
|
||||||
|
vulnerabilities of a specific group of persons."
|
||||||
|
|
||||||
|
For these reasons Stable Diffusion offers a "safety checker," a
|
||||||
|
machine learning model trained to recognize potentially disturbing
|
||||||
|
imagery. When a potentially NSFW image is detected, the checker will
|
||||||
|
blur the image and paste a warning icon on top. The checker can be
|
||||||
|
turned on and off on the command line using `--nsfw_checker` and
|
||||||
|
`--no-nsfw_checker`.
|
||||||
|
|
||||||
|
At installation time, InvokeAI will ask whether the checker should be
|
||||||
|
activated by default (neither argument given on the command line). The
|
||||||
|
response is stored in the InvokeAI initialization file (usually
|
||||||
|
`.invokeai` in your home directory). You can change the default at any
|
||||||
|
time by opening this file in a text editor and commenting or
|
||||||
|
uncommenting the line `--nsfw_checker`.
|
||||||
|
|
||||||
|
## Caveats
|
||||||
|
|
||||||
|
There are a number of caveats that you need to be aware of.
|
||||||
|
|
||||||
|
### Accuracy
|
||||||
|
|
||||||
|
The checker is [not perfect](https://arxiv.org/abs/2210.04610).It will
|
||||||
|
occasionally flag innocuous images (false positives), and will
|
||||||
|
frequently miss violent and gory imagery (false negatives). It rarely
|
||||||
|
fails to flag sexual imagery, but this has been known to happen. For
|
||||||
|
these reasons, the InvokeAI team prefers to refer to the software as a
|
||||||
|
"NSFW Checker" rather than "safety checker."
|
||||||
|
|
||||||
|
### Memory Usage and Performance
|
||||||
|
|
||||||
|
The NSFW checker consumes an additional 1.2G of GPU VRAM on top of the
|
||||||
|
3.4G of VRAM used by Stable Diffusion v1.5 (this is with
|
||||||
|
half-precision arithmetic). This means that the checker will not run
|
||||||
|
successfully on GPU cards with less than 6GB VRAM, and will reduce the
|
||||||
|
size of the images that you can produce.
|
||||||
|
|
||||||
|
The checker also introduces a slight performance penalty. Images will
|
||||||
|
take ~1 second longer to generate when the checker is
|
||||||
|
activated. Generally this is not noticeable.
|
||||||
|
|
||||||
|
### Intermediate Images in the Web UI
|
||||||
|
|
||||||
|
The checker only operates on the final image produced by the Stable
|
||||||
|
Diffusion algorithm. If you are using the Web UI and have enabled the
|
||||||
|
display of intermediate images, you will briefly be exposed to a
|
||||||
|
low-resolution (mosaicized) version of the final image before it is
|
||||||
|
flagged by the checker and replaced by a fully blurred version. You
|
||||||
|
are encouraged to turn **off** intermediate image rendering when you
|
||||||
|
are using the checker. Future versions of InvokeAI will apply
|
||||||
|
additional blurring to intermediate images when the checker is active.
|
||||||
|
|
||||||
|
### Watermarking
|
||||||
|
|
||||||
|
InvokeAI does not apply any sort of watermark to images it
|
||||||
|
generates. However, it does write metadata into the PNG data area,
|
||||||
|
including the prompt used to generate the image and relevant parameter
|
||||||
|
settings. These fields can be examined using the `sd-metadata.py`
|
||||||
|
script that comes with the InvokeAI package.
|
||||||
|
|
||||||
|
Note that several other Stable Diffusion distributions offer
|
||||||
|
wavelet-based "invisible" watermarking. We have experimented with the
|
||||||
|
library used to generate these watermarks and have reached the
|
||||||
|
conclusion that while the watermarking library may be adding
|
||||||
|
watermarks to PNG images, the currently available version is unable to
|
||||||
|
retrieve them successfully. If and when a functioning version of the
|
||||||
|
library becomes available, we will offer this feature as well.
|
@ -133,29 +133,6 @@ outputs = g.txt2img("a unicorn in manhattan")
|
|||||||
|
|
||||||
Outputs is a list of lists in the format [filename1,seed1],[filename2,seed2]...].
|
Outputs is a list of lists in the format [filename1,seed1],[filename2,seed2]...].
|
||||||
|
|
||||||
Please see ldm/generate.py for more information. A set of example scripts is coming RSN.
|
Please see the documentation in ldm/generate.py for more information.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## **Preload Models**
|
|
||||||
|
|
||||||
In situations where you have limited internet connectivity or are blocked behind a firewall, you can
|
|
||||||
use the preload script to preload the required files for Stable Diffusion to run.
|
|
||||||
|
|
||||||
The preload script `scripts/preload_models.py` needs to be run once at least while connected to the
|
|
||||||
internet. In the following runs, it will load up the cached versions of the required files from the
|
|
||||||
`.cache` directory of the system.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
(invokeai) ~/stable-diffusion$ python3 ./scripts/preload_models.py
|
|
||||||
preloading bert tokenizer...
|
|
||||||
Downloading: 100%|██████████████████████████████████| 28.0/28.0 [00:00<00:00, 49.3kB/s]
|
|
||||||
Downloading: 100%|██████████████████████████████████| 226k/226k [00:00<00:00, 2.79MB/s]
|
|
||||||
Downloading: 100%|██████████████████████████████████| 455k/455k [00:00<00:00, 4.36MB/s]
|
|
||||||
Downloading: 100%|██████████████████████████████████| 570/570 [00:00<00:00, 477kB/s]
|
|
||||||
...success
|
|
||||||
preloading kornia requirements...
|
|
||||||
Downloading: "https://github.com/DagnyT/hardnet/raw/master/pretrained/train_liberty_with_aug/checkpoint_liberty_with_aug.pth" to /u/lstein/.cache/torch/hub/checkpoints/checkpoint_liberty_with_aug.pth
|
|
||||||
100%|███████████████████████████████████████████████| 5.10M/5.10M [00:00<00:00, 101MB/s]
|
|
||||||
...success
|
|
||||||
```
|
|
||||||
|
@ -120,7 +120,7 @@ A number of caveats:
|
|||||||
(`--iterations`) argument.
|
(`--iterations`) argument.
|
||||||
|
|
||||||
3. Your results will be _much_ better if you use the `inpaint-1.5` model
|
3. Your results will be _much_ better if you use the `inpaint-1.5` model
|
||||||
released by runwayML and installed by default by `scripts/preload_models.py`.
|
released by runwayML and installed by default by `scripts/configure_invokeai.py`.
|
||||||
This model was trained specifically to harmoniously fill in image gaps. The
|
This model was trained specifically to harmoniously fill in image gaps. The
|
||||||
standard model will work as well, but you may notice color discontinuities at
|
standard model will work as well, but you may notice color discontinuities at
|
||||||
the border.
|
the border.
|
||||||
|
@ -28,21 +28,17 @@ should "just work" without further intervention. Simply pass the `--upscale`
|
|||||||
the popup in the Web GUI.
|
the popup in the Web GUI.
|
||||||
|
|
||||||
**GFPGAN** requires a series of downloadable model files to work. These are
|
**GFPGAN** requires a series of downloadable model files to work. These are
|
||||||
loaded when you run `scripts/preload_models.py`. If GFPAN is failing with an
|
loaded when you run `scripts/configure_invokeai.py`. If GFPAN is failing with an
|
||||||
error, please run the following from the InvokeAI directory:
|
error, please run the following from the InvokeAI directory:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python scripts/preload_models.py
|
python scripts/configure_invokeai.py
|
||||||
```
|
```
|
||||||
|
|
||||||
If you do not run this script in advance, the GFPGAN module will attempt to
|
If you do not run this script in advance, the GFPGAN module will attempt to
|
||||||
download the models files the first time you try to perform facial
|
download the models files the first time you try to perform facial
|
||||||
reconstruction.
|
reconstruction.
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
You will now have access to two new prompt arguments.
|
|
||||||
|
|
||||||
### Upscaling
|
### Upscaling
|
||||||
|
|
||||||
`-U : <upscaling_factor> <upscaling_strength>`
|
`-U : <upscaling_factor> <upscaling_strength>`
|
||||||
@ -110,7 +106,7 @@ This repo also allows you to perform face restoration using
|
|||||||
[CodeFormer](https://github.com/sczhou/CodeFormer).
|
[CodeFormer](https://github.com/sczhou/CodeFormer).
|
||||||
|
|
||||||
In order to setup CodeFormer to work, you need to download the models like with
|
In order to setup CodeFormer to work, you need to download the models like with
|
||||||
GFPGAN. You can do this either by running `preload_models.py` or by manually
|
GFPGAN. You can do this either by running `configure_invokeai.py` or by manually
|
||||||
downloading the
|
downloading the
|
||||||
[model file](https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth)
|
[model file](https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth)
|
||||||
and saving it to `ldm/invoke/restoration/codeformer/weights` folder.
|
and saving it to `ldm/invoke/restoration/codeformer/weights` folder.
|
||||||
@ -119,7 +115,7 @@ You can use `-ft` prompt argument to swap between CodeFormer and the default
|
|||||||
GFPGAN. The above mentioned `-G` prompt argument will allow you to control the
|
GFPGAN. The above mentioned `-G` prompt argument will allow you to control the
|
||||||
strength of the restoration effect.
|
strength of the restoration effect.
|
||||||
|
|
||||||
### Usage
|
### CodeFormer Usage
|
||||||
|
|
||||||
The following command will perform face restoration with CodeFormer instead of
|
The following command will perform face restoration with CodeFormer instead of
|
||||||
the default gfpgan.
|
the default gfpgan.
|
||||||
@ -160,7 +156,7 @@ A new file named `000044.2945021133.fixed.png` will be created in the output
|
|||||||
directory. Note that the `!fix` command does not replace the original file,
|
directory. Note that the `!fix` command does not replace the original file,
|
||||||
unlike the behavior at generate time.
|
unlike the behavior at generate time.
|
||||||
|
|
||||||
### Disabling
|
## How to disable
|
||||||
|
|
||||||
If, for some reason, you do not wish to load the GFPGAN and/or ESRGAN libraries,
|
If, for some reason, you do not wish to load the GFPGAN and/or ESRGAN libraries,
|
||||||
you can disable them on the invoke.py command line with the `--no_restore` and
|
you can disable them on the invoke.py command line with the `--no_restore` and
|
||||||
|
@ -20,16 +20,55 @@ would type at the invoke> prompt:
|
|||||||
Then pass this file's name to `invoke.py` when you invoke it:
|
Then pass this file's name to `invoke.py` when you invoke it:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
(invokeai) ~/stable-diffusion$ python3 scripts/invoke.py --from_file "path/to/prompts.txt"
|
python scripts/invoke.py --from_file "/path/to/prompts.txt"
|
||||||
```
|
```
|
||||||
|
|
||||||
You may read a series of prompts from standard input by providing a filename of
|
You may also read a series of prompts from standard input by providing
|
||||||
`-`:
|
a filename of `-`. For example, here is a python script that creates a
|
||||||
|
matrix of prompts, each one varying slightly:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
(invokeai) ~/stable-diffusion$ echo "a beautiful day" | python3 scripts/invoke.py --from_file -
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
adjectives = ['sunny','rainy','overcast']
|
||||||
|
samplers = ['k_lms','k_euler_a','k_heun']
|
||||||
|
cfg = [7.5, 9, 11]
|
||||||
|
|
||||||
|
for adj in adjectives:
|
||||||
|
for samp in samplers:
|
||||||
|
for cg in cfg:
|
||||||
|
print(f'a {adj} day -A{samp} -C{cg}')
|
||||||
```
|
```
|
||||||
|
|
||||||
|
It's output looks like this (abbreviated):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
a sunny day -Aklms -C7.5
|
||||||
|
a sunny day -Aklms -C9
|
||||||
|
a sunny day -Aklms -C11
|
||||||
|
a sunny day -Ak_euler_a -C7.5
|
||||||
|
a sunny day -Ak_euler_a -C9
|
||||||
|
...
|
||||||
|
a overcast day -Ak_heun -C9
|
||||||
|
a overcast day -Ak_heun -C11
|
||||||
|
```
|
||||||
|
|
||||||
|
To feed it to invoke.py, pass the filename of "-"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python matrix.py | python scripts/invoke.py --from_file -
|
||||||
|
```
|
||||||
|
|
||||||
|
When the script is finished, each of the 27 combinations
|
||||||
|
of adjective, sampler and CFG will be executed.
|
||||||
|
|
||||||
|
The command-line interface provides `!fetch` and `!replay` commands
|
||||||
|
which allow you to read the prompts from a single previously-generated
|
||||||
|
image or a whole directory of them, write the prompts to a file, and
|
||||||
|
then replay them. Or you can create your own file of prompts and feed
|
||||||
|
them to the command-line client from within an interactive session.
|
||||||
|
See [Command-Line Interface](CLI.md) for details.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## **Negative and Unconditioned Prompts**
|
## **Negative and Unconditioned Prompts**
|
||||||
@ -51,7 +90,9 @@ original prompt:
|
|||||||
`#!bash "A fantastical translucent pony made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
`#!bash "A fantastical translucent pony made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
That image has a woman, so if we want the horse without a rider, we can
|
That image has a woman, so if we want the horse without a rider, we can
|
||||||
@ -61,7 +102,9 @@ this:
|
|||||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
That's nice - but say we also don't want the image to be quite so blue. We can
|
That's nice - but say we also don't want the image to be quite so blue. We can
|
||||||
@ -70,7 +113,9 @@ add "blue" to the list of negative prompts, so it's now [woman blue]:
|
|||||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
Getting close - but there's no sense in having a saddle when our horse doesn't
|
Getting close - but there's no sense in having a saddle when our horse doesn't
|
||||||
@ -79,7 +124,9 @@ have a rider, so we'll add one more negative prompt: [woman blue saddle].
|
|||||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue saddle]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue saddle]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
!!! notes "Notes about this feature:"
|
!!! notes "Notes about this feature:"
|
||||||
@ -124,8 +171,12 @@ this prompt of `a man picking apricots from a tree`, let's see what happens if
|
|||||||
we increase and decrease how much attention we want Stable Diffusion to pay to
|
we increase and decrease how much attention we want Stable Diffusion to pay to
|
||||||
the word `apricots`:
|
the word `apricots`:
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
</figure>
|
||||||
|
|
||||||
Using `-` to reduce apricot-ness:
|
Using `-` to reduce apricot-ness:
|
||||||
|
|
||||||
| `a man picking apricots- from a tree` | `a man picking apricots-- from a tree` | `a man picking apricots--- from a tree` |
|
| `a man picking apricots- from a tree` | `a man picking apricots-- from a tree` | `a man picking apricots--- from a tree` |
|
||||||
@ -141,8 +192,12 @@ Using `+` to increase apricot-ness:
|
|||||||
You can also change the balance between different parts of a prompt. For
|
You can also change the balance between different parts of a prompt. For
|
||||||
example, below is a `mountain man`:
|
example, below is a `mountain man`:
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
</figure>
|
||||||
|
|
||||||
And here he is with more mountain:
|
And here he is with more mountain:
|
||||||
|
|
||||||
| `mountain+ man` | `mountain++ man` | `mountain+++ man` |
|
| `mountain+ man` | `mountain++ man` | `mountain+++ man` |
|
||||||
@ -259,14 +314,18 @@ usual, unless you fix the seed, the prompts will give you different results each
|
|||||||
time you run them.
|
time you run them.
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||
### "blue sphere, red cube, hybrid"
|
### "blue sphere, red cube, hybrid"
|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
This example doesn't use melding at all and represents the default way of mixing
|
This example doesn't use melding at all and represents the default way of mixing
|
||||||
concepts.
|
concepts.
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
It's interesting to see how the AI expressed the concept of "cube" as the four
|
It's interesting to see how the AI expressed the concept of "cube" as the four
|
||||||
@ -274,6 +333,7 @@ quadrants of the enclosing frame. If you look closely, there is depth there, so
|
|||||||
the enclosing frame is actually a cube.
|
the enclosing frame is actually a cube.
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||
### "blue sphere:0.25 red cube:0.75 hybrid"
|
### "blue sphere:0.25 red cube:0.75 hybrid"
|
||||||
|
|
||||||

|

|
||||||
@ -286,6 +346,7 @@ the AI's "latent space" of semantic representations. Where is Ludwig
|
|||||||
Wittgenstein when you need him?
|
Wittgenstein when you need him?
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||
### "blue sphere:0.75 red cube:0.25 hybrid"
|
### "blue sphere:0.75 red cube:0.25 hybrid"
|
||||||
|
|
||||||

|

|
||||||
@ -296,6 +357,7 @@ Definitely more blue-spherey. The cube is gone entirely, but it's really cool
|
|||||||
abstract art.
|
abstract art.
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||
### "blue sphere:0.5 red cube:0.5 hybrid"
|
### "blue sphere:0.5 red cube:0.5 hybrid"
|
||||||
|
|
||||||

|

|
||||||
@ -306,6 +368,7 @@ Whoa...! I see blue and red, but no spheres or cubes. Is the word "hybrid"
|
|||||||
summoning up the concept of some sort of scifi creature? Let's find out.
|
summoning up the concept of some sort of scifi creature? Let's find out.
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||
### "blue sphere:0.5 red cube:0.5"
|
### "blue sphere:0.5 red cube:0.5"
|
||||||
|
|
||||||

|

|
||||||
|
284
docs/features/UNIFIED_CANVAS.md
Normal file
@ -0,0 +1,284 @@
|
|||||||
|
---
|
||||||
|
title: Unified Canvas
|
||||||
|
---
|
||||||
|
|
||||||
|
The Unified Canvas is a tool designed to streamline and simplify the process of
|
||||||
|
composing an image using Stable Diffusion. It offers artists all of the
|
||||||
|
available Stable Diffusion generation modes (Text To Image, Image To Image,
|
||||||
|
Inpainting, and Outpainting) as a single unified workflow. The flexibility of
|
||||||
|
the tool allows you to tweak and edit image generations, extend images beyond
|
||||||
|
their initial size, and to create new content in a freeform way both inside and
|
||||||
|
outside of existing images.
|
||||||
|
|
||||||
|
This document explains the basics of using the Unified Canvas, introducing you
|
||||||
|
to its features and tools one by one. It also describes some of the more
|
||||||
|
advanced tools available to power users of the Canvas.
|
||||||
|
|
||||||
|
## Basics
|
||||||
|
|
||||||
|
The Unified Canvas consists of two layers: the **Base Layer** and the **Mask
|
||||||
|
Layer**. You can swap from one layer to the other by selecting the layer you
|
||||||
|
want in the drop-down menu on the top left corner of the Unified Canvas, or by
|
||||||
|
pressing the (Q) hotkey.
|
||||||
|
|
||||||
|
### Base Layer
|
||||||
|
|
||||||
|
The **Base Layer** is the image content currently managed by the Canvas, and can
|
||||||
|
be exported at any time to the gallery by using the **Save to Gallery** option.
|
||||||
|
When the Base Layer is selected, the Brush (B) and Eraser (E) tools will
|
||||||
|
directly manipulate the base layer. Any images uploaded to the Canvas, or sent
|
||||||
|
to the Unified Canvas from the gallery, will clear out all existing content and
|
||||||
|
set the Base layer to the new image.
|
||||||
|
|
||||||
|
### Staging Area
|
||||||
|
|
||||||
|
When you generate images, they will display in the Canvas's **Staging Area**,
|
||||||
|
alongside the Staging Area toolbar buttons. While the Staging Area is active,
|
||||||
|
you cannot interact with the Canvas itself.
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
Accepting generations will commit the new generation to the **Base Layer**. You
|
||||||
|
can review all generated images using the Prev/Next arrows, save any individual
|
||||||
|
generations to your gallery (without committing to the Base layer) or discard
|
||||||
|
generations. While you can Undo a discard in an individual Canvas session, any
|
||||||
|
generations that are not saved will be lost when the Canvas resets.
|
||||||
|
|
||||||
|
### Mask Layer
|
||||||
|
|
||||||
|
The **Mask Layer** consists of any masked sections that have been created to
|
||||||
|
inform Inpainting generations. You can paint a new mask, or edit an existing
|
||||||
|
mask, using the Brush tool and the Eraser with the Mask layer set as your Active
|
||||||
|
layer. Any masked areas will only affect generation inside of the current
|
||||||
|
bounding box.
|
||||||
|
|
||||||
|
### Bounding Box
|
||||||
|
|
||||||
|
When generating a new image, Invoke will process and apply new images within the
|
||||||
|
area denoted by the **Bounding Box**. The Width & Height settings of the
|
||||||
|
Bounding Box, as well as its location within the Unified Canvas and pixels or
|
||||||
|
empty space that it encloses, determine how new invocations are generated - see
|
||||||
|
[Inpainting & Outpainting](#inpainting-and-outpainting) below. The Bounding Box
|
||||||
|
can be moved and resized using the Move (V) tool. It can also be resized using
|
||||||
|
the Bounding Box options in the Options Panel. By using these controls you can
|
||||||
|
generate larger or smaller images, control which sections of the image are being
|
||||||
|
processed, as well as control Bounding Box tools like the Bounding Box
|
||||||
|
fill/erase.
|
||||||
|
|
||||||
|
### <a name="inpainting-and-outpainting"></a> Inpainting & Outpainting
|
||||||
|
|
||||||
|
"Inpainting" means asking the AI to refine part of an image while leaving the
|
||||||
|
rest alone. For example, updating a portrait of your grandmother to have her
|
||||||
|
wear a biker's jacket.
|
||||||
|
|
||||||
|
| masked original | inpaint result |
|
||||||
|
| :-------------------------------------------------------------: | :----------------------------------------------------------------------------------------: |
|
||||||
|
|  |  |
|
||||||
|
|
||||||
|
"Outpainting" means asking the AI to expand the original image beyond its
|
||||||
|
original borders, making a bigger image that's still based on the original. For
|
||||||
|
example, extending the above image of your Grandmother in a biker's jacket to
|
||||||
|
include her wearing jeans (and while we're at it, a motorcycle!)
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
When you are using the Unified Canvas, Invoke decides automatically whether to
|
||||||
|
do Inpainting, Outpainting, ImageToImage, or TextToImage by looking inside the
|
||||||
|
area enclosed by the Bounding Box. It chooses the appropriate type of generation
|
||||||
|
based on whether the Bounding Box contains empty (transparent) areas on the Base
|
||||||
|
layer, or whether it contains colored areas from previous generations (or from
|
||||||
|
painted brushstrokes) on the Base layer, and/or whether the Mask layer contains
|
||||||
|
any brushstrokes. See [Generation Methods](#generation-methods) below for more
|
||||||
|
information.
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
To get started with the Unified Canvas, you will want to generate a new base
|
||||||
|
layer using Txt2Img or importing an initial image. We'll refer to either of
|
||||||
|
these methods as the "initial image" in the below guide.
|
||||||
|
|
||||||
|
From there, you can consider the following techniques to augment your image:
|
||||||
|
|
||||||
|
- **New Images**: Move the bounding box to an empty area of the Canvas, type in
|
||||||
|
your prompt, and Invoke, to generate a new image using the Text to Image
|
||||||
|
function.
|
||||||
|
- **Image Correction**: Use the color picker and brush tool to paint corrections
|
||||||
|
on the image, switch to the Mask layer, and brush a mask over your painted
|
||||||
|
area to use **Inpainting**. You can also use the **ImageToImage** generation
|
||||||
|
method to invoke new interpretations of the image.
|
||||||
|
- **Image Expansion**: Move the bounding box to include a portion of your
|
||||||
|
initial image, and a portion of transparent/empty pixels, then Invoke using a
|
||||||
|
prompt that describes what you'd like to see in that area. This will Outpaint
|
||||||
|
the image. You'll typically find more coherent results if you keep about
|
||||||
|
50-60% of the original image in the bounding box. Make sure that the Image To
|
||||||
|
Image Strength slider is set to a high value - you may need to set it higher
|
||||||
|
than you are used to.
|
||||||
|
- **New Content on Existing Images**: If you want to add new details or objects
|
||||||
|
into your image, use the brush tool to paint a sketch of what you'd like to
|
||||||
|
see on the image, switch to the Mask layer, and brush a mask over your painted
|
||||||
|
area to use **Inpainting**. If the masked area is small, consider using a
|
||||||
|
smaller bounding box to take advantage of Invoke's automatic Scaling features,
|
||||||
|
which can help to produce better details.
|
||||||
|
- **And more**: There are a number of creative ways to use the Canvas, and the
|
||||||
|
above are just starting points. We're excited to see what you come up with!
|
||||||
|
|
||||||
|
## <a name="generation-methods"></a> Generation Methods
|
||||||
|
|
||||||
|
The Canvas can use all generation methods available (Txt2Img, Img2Img,
|
||||||
|
Inpainting, and Outpainting), and these will be automatically selected and used
|
||||||
|
based on the current selection area within the Bounding Box.
|
||||||
|
|
||||||
|
### Text to Image
|
||||||
|
|
||||||
|
If the Bounding Box is placed over an area of Canvas with an **empty Base
|
||||||
|
Layer**, invoking a new image will use **TextToImage**. This generates an
|
||||||
|
entirely new image based on your prompt.
|
||||||
|
|
||||||
|
### Image to Image
|
||||||
|
|
||||||
|
If the Bounding Box is placed over an area of Canvas with an **existing Base
|
||||||
|
Layer area with no transparent pixels or masks**, invoking a new image will use
|
||||||
|
**ImageToImage**. This uses the image within the bounding box and your prompt to
|
||||||
|
interpret a new image. The image will be closer to your original image at lower
|
||||||
|
Image to Image strengths.
|
||||||
|
|
||||||
|
### Inpainting
|
||||||
|
|
||||||
|
If the Bounding Box is placed over an area of Canvas with an **existing Base
|
||||||
|
Layer and any pixels selected using the Mask layer**, invoking a new image will
|
||||||
|
use **Inpainting**. Inpainting uses the existing colors/forms in the masked area
|
||||||
|
in order to generate a new image for the masked area only. The unmasked portion
|
||||||
|
of the image will remain the same. Image to Image strength applies to the
|
||||||
|
inpainted area.
|
||||||
|
|
||||||
|
If you desire something completely different from the original image in your new
|
||||||
|
generation (i.e., if you want Invoke to ignore existing colors/forms), consider
|
||||||
|
toggling the Inpaint Replace setting on, and use high values for both Inpaint
|
||||||
|
Replace and Image To Image Strength.
|
||||||
|
|
||||||
|
!!! note
|
||||||
|
|
||||||
|
By default, the **Scale Before Processing** option — which
|
||||||
|
inpaints more coherent details by generating at a larger resolution and then
|
||||||
|
scaling — is only activated when the Bounding Box is relatively small.
|
||||||
|
To get the best inpainting results you should therefore resize your Bounding
|
||||||
|
Box to the smallest area that contains your mask and enough surrounding detail
|
||||||
|
to help Stable Diffusion understand the context of what you want it to draw.
|
||||||
|
You should also update your prompt so that it describes _just_ the area within
|
||||||
|
the Bounding Box.
|
||||||
|
|
||||||
|
### Outpainting
|
||||||
|
|
||||||
|
If the Bounding Box is placed over an area of Canvas partially filled by an
|
||||||
|
existing Base Layer area and partially by transparent pixels or masks, invoking
|
||||||
|
a new image will use **Outpainting**, as well as **Inpainting** any masked
|
||||||
|
areas.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Advanced Features
|
||||||
|
|
||||||
|
Features with non-obvious behavior are detailed below, in order to provide
|
||||||
|
clarity on the intent and common use cases we expect for utilizing them.
|
||||||
|
|
||||||
|
### Toolbar
|
||||||
|
|
||||||
|
#### Mask Options
|
||||||
|
|
||||||
|
- **Enable Mask** - This flag can be used to Enable or Disable the currently
|
||||||
|
painted mask. If you have painted a mask, but you don't want it affect the
|
||||||
|
next invocation, but you _also_ don't want to delete it, then you can set this
|
||||||
|
option to Disable. When you want the mask back, set this back to Enable.
|
||||||
|
- **Preserve Masked Area** - When enabled, Preserve Masked Area inverts the
|
||||||
|
effect of the Mask on the Inpainting process. Pixels in masked areas will be
|
||||||
|
kept unchanged, and unmasked areas will be regenerated.
|
||||||
|
|
||||||
|
#### Creative Tools
|
||||||
|
|
||||||
|
- **Brush - Base/Mask Modes** - The Brush tool switches automatically between
|
||||||
|
different modes of operation for the Base and Mask layers respectively.
|
||||||
|
- On the Base layer, the brush will directly paint on the Canvas using the
|
||||||
|
color selected on the Brush Options menu.
|
||||||
|
- On the Mask layer, the brush will create a new mask. If you're finding the
|
||||||
|
mask difficult to see over the existing content of the Unified Canvas, you
|
||||||
|
can change the color it is drawn with using the color selector on the Mask
|
||||||
|
Options dropdown.
|
||||||
|
- **Erase Bounding Box** - On the Base layer, erases all pixels within the
|
||||||
|
Bounding Box.
|
||||||
|
- **Fill Bounding Box** - On the Base layer, fills all pixels within the
|
||||||
|
Bounding Box with the currently selected color.
|
||||||
|
|
||||||
|
#### Canvas Tools
|
||||||
|
|
||||||
|
- **Move Tool** - Allows for manipulation of the Canvas view (by dragging on the
|
||||||
|
Canvas, outside the bounding box), the Bounding Box (by dragging the edges of
|
||||||
|
the box), or the Width/Height of the Bounding Box (by dragging one of the 9
|
||||||
|
directional handles).
|
||||||
|
- **Reset View** - Click to re-orients the view to the center of the Bounding
|
||||||
|
Box.
|
||||||
|
- **Merge Visible** - If your browser is having performance problems drawing the
|
||||||
|
image in the Unified Canvas, click this to consolidate all of the information
|
||||||
|
currently being rendered by your browser into a merged copy of the image. This
|
||||||
|
lowers the resource requirements and should improve performance.
|
||||||
|
|
||||||
|
### Seam Correction
|
||||||
|
|
||||||
|
When doing Inpainting or Outpainting, Invoke needs to merge the pixels generated
|
||||||
|
by Stable Diffusion into your existing image. To do this, the area around the
|
||||||
|
`seam` at the boundary between your image and the new generation is
|
||||||
|
automatically blended to produce a seamless output. In a fully automatic
|
||||||
|
process, a mask is generated to cover the seam, and then the area of the seam is
|
||||||
|
Inpainted.
|
||||||
|
|
||||||
|
Although the default options should work well most of the time, sometimes it can
|
||||||
|
help to alter the parameters that control the seam Inpainting. A wider seam and
|
||||||
|
a blur setting of about 1/3 of the seam have been noted as producing
|
||||||
|
consistently strong results (e.g. 96 wide and 16 blur - adds up to 32 blur with
|
||||||
|
both sides). Seam strength of 0.7 is best for reducing hard seams.
|
||||||
|
|
||||||
|
- **Seam Size** - The size of the seam masked area. Set higher to make a larger
|
||||||
|
mask around the seam.
|
||||||
|
- **Seam Blur** - The size of the blur that is applied on _each_ side of the
|
||||||
|
masked area.
|
||||||
|
- **Seam Strength** - The Image To Image Strength parameter used for the
|
||||||
|
Inpainting generation that is applied to the seam area.
|
||||||
|
- **Seam Steps** - The number of generation steps that should be used to Inpaint
|
||||||
|
the seam.
|
||||||
|
|
||||||
|
### Infill & Scaling
|
||||||
|
|
||||||
|
- **Scale Before Processing & W/H**: When generating images with a bounding box
|
||||||
|
smaller than the optimized W/H of the model (e.g., 512x512 for SD1.5), this
|
||||||
|
feature first generates at a larger size with the same aspect ratio, and then
|
||||||
|
scales that image down to fill the selected area. This is particularly useful
|
||||||
|
when inpainting very small details. Scaling is optional but is enabled by
|
||||||
|
default.
|
||||||
|
- **Inpaint Replace**: When Inpainting, the default method is to utilize the
|
||||||
|
existing RGB values of the Base layer to inform the generation process. If
|
||||||
|
Inpaint Replace is enabled, noise is generated and blended with the existing
|
||||||
|
pixels (completely replacing the original RGB values at an Inpaint Replace
|
||||||
|
value of 1). This can help generate more variation from the pixels on the Base
|
||||||
|
layers.
|
||||||
|
- When using Inpaint Replace you should use a higher Image To Image Strength
|
||||||
|
value, especially at higher Inpaint Replace values
|
||||||
|
- **Infill Method**: Invoke currently supports two methods for producing RGB
|
||||||
|
values for use in the Outpainting process: Patchmatch and Tile. We believe
|
||||||
|
that Patchmatch is the superior method, however we provide support for Tile in
|
||||||
|
case Patchmatch cannot be installed or is unavailable on your computer.
|
||||||
|
- **Tile Size**: The Tile method for Outpainting sources small portions of the
|
||||||
|
original image and randomly place these into the areas being Outpainted. This
|
||||||
|
value sets the size of those tiles.
|
||||||
|
|
||||||
|
## Hot Keys
|
||||||
|
|
||||||
|
The Unified Canvas is a tool that excels when you use hotkeys. You can view the
|
||||||
|
full list of keyboard shortcuts, updated with all new features, by clicking the
|
||||||
|
Keyboard Shortcuts icon at the top right of the InvokeAI WebUI.
|
@ -303,6 +303,8 @@ The WebGUI is only rapid development. Check back regularly for updates!
|
|||||||
| `--cors [CORS ...]` | Additional allowed origins, comma-separated |
|
| `--cors [CORS ...]` | Additional allowed origins, comma-separated |
|
||||||
| `--host HOST` | Web server: Host or IP to listen on. Set to 0.0.0.0 to accept traffic from other devices on your network. |
|
| `--host HOST` | Web server: Host or IP to listen on. Set to 0.0.0.0 to accept traffic from other devices on your network. |
|
||||||
| `--port PORT` | Web server: Port to listen on |
|
| `--port PORT` | Web server: Port to listen on |
|
||||||
|
| `--certfile CERTFILE` | Web server: Path to certificate file to use for SSL. Use together with --keyfile |
|
||||||
|
| `--keyfile KEYFILE` | Web server: Path to private key file to use for SSL. Use together with --certfile' |
|
||||||
| `--gui` | Start InvokeAI GUI - This is the "desktop mode" version of the web app. It uses Flask to create a desktop app experience of the webserver. |
|
| `--gui` | Start InvokeAI GUI - This is the "desktop mode" version of the web app. It uses Flask to create a desktop app experience of the webserver. |
|
||||||
|
|
||||||
### Web Specific Features
|
### Web Specific Features
|
||||||
|
@ -4,59 +4,72 @@ title: WebUI Hotkey List
|
|||||||
|
|
||||||
# :material-keyboard: **WebUI Hotkey List**
|
# :material-keyboard: **WebUI Hotkey List**
|
||||||
|
|
||||||
## General
|
## App Hotkeys
|
||||||
|
|
||||||
| Setting | Hotkey |
|
| Setting | Hotkey |
|
||||||
| ----------------- | ---------------------- |
|
| ----------------- | ------------------ |
|
||||||
| ++a++ | Set All Parameters |
|
| ++"Ctrl\+Enter"++ | Invoke |
|
||||||
| ++s++ | Set Seed |
|
| ++"Shift\+X"++ | Cancel |
|
||||||
| ++u++ | Upscale |
|
| ++"Alt\+A"++ | Focus Prompt |
|
||||||
| ++r++ | Restoration |
|
| ++"O"++ | Toggle Options |
|
||||||
| ++i++ | Show Metadata |
|
| ++"Shift\+O"++ | Pin Options |
|
||||||
| ++d++ ++d++ ++l++ | Delete Image |
|
| ++"Z"++ | Toggle Viewer |
|
||||||
| ++alt+a++ | Focus prompt input |
|
| ++"G"++ | Toggle Gallery |
|
||||||
| ++shift+i++ | Send To Image to Image |
|
| ++"F"++ | Maximize Workspace |
|
||||||
| ++ctrl+enter++ | Start processing |
|
| ++"1-5"++ | Change Tabs |
|
||||||
| ++shift+x++ | cancel Processing |
|
| ++"`"++ | Toggle Console |
|
||||||
| ++shift+d++ | Toggle Dark Mode |
|
|
||||||
| ++"`"++ | Toggle console |
|
|
||||||
|
|
||||||
## Tabs
|
## General Hotkeys
|
||||||
|
|
||||||
| Setting | Hotkey |
|
| Setting | Hotkey |
|
||||||
| ------- | ------------------------- |
|
| --------------- | ---------------------- |
|
||||||
| ++1++ | Go to Text To Image Tab |
|
| ++"P"++ | Set Prompt |
|
||||||
| ++2++ | Go to Image to Image Tab |
|
| ++"S"++ | Set Seed |
|
||||||
| ++3++ | Go to Inpainting Tab |
|
| ++"A"++ | Set Parameters |
|
||||||
| ++4++ | Go to Outpainting Tab |
|
| ++"Shift\+R"++ | Restore Faces |
|
||||||
| ++5++ | Go to Nodes Tab |
|
| ++"Shift\+U"++ | Upscale |
|
||||||
| ++6++ | Go to Post Processing Tab |
|
| ++"I"++ | Show Info |
|
||||||
|
| ++"Shift\+I"++ | Send To Image To Image |
|
||||||
|
| ++"Del"++ | Delete Image |
|
||||||
|
| ++"Esc"++ | Close Panels |
|
||||||
|
|
||||||
## Gallery
|
## Gallery Hotkeys
|
||||||
|
|
||||||
| Setting | Hotkey |
|
| Setting | Hotkey |
|
||||||
| -------------- | ------------------------------- |
|
| ------------------ | --------------------------- |
|
||||||
| ++g++ | Toggle Gallery |
|
| ++"Arrow Left"++ | Previous Image |
|
||||||
| ++left++ | Go to previous image in gallery |
|
| ++"Arrow Right"++ | Next Image |
|
||||||
| ++right++ | Go to next image in gallery |
|
| ++"Shift\+G"++ | Toggle Gallery Pin |
|
||||||
| ++shift+p++ | Pin gallery |
|
| ++"Shift\+Up"++ | Increase Gallery Image Size |
|
||||||
| ++shift+up++ | Increase gallery image size |
|
| ++"Shift\+Down"++ | Decrease Gallery Image Size |
|
||||||
| ++shift+down++ | Decrease gallery image size |
|
|
||||||
| ++shift+r++ | Reset image gallery size |
|
|
||||||
|
|
||||||
## Inpainting
|
## Unified Canvas Hotkeys
|
||||||
|
|
||||||
| Setting | Hotkey |
|
| Setting | Hotkey |
|
||||||
| ---------------------------- | --------------------- |
|
| ------------------------------ | ---------------------- |
|
||||||
| ++"["++ | Decrease brush size |
|
| ++"B"++ | Select Brush |
|
||||||
| ++"]"++ | Increase brush size |
|
| ++"E"++ | Select Eraser |
|
||||||
| ++alt+"["++ | Decrease mask opacity |
|
| ++"["++ | Decrease Brush Size |
|
||||||
| ++alt+"]"++ | Increase mask opacity |
|
| ++"]"++ | Increase Brush Size |
|
||||||
| ++b++ | Select brush |
|
| ++"Shift\+["++ | Decrease Brush Opacity |
|
||||||
| ++e++ | Select eraser |
|
| ++"Shift\+]"++ | Increase Brush Opacity |
|
||||||
| ++ctrl+z++ | Undo brush stroke |
|
| ++"V"++ | Move Tool |
|
||||||
| ++ctrl+shift+z++, ++ctrl+y++ | Redo brush stroke |
|
| ++"Shift\+F"++ | Fill Bounding Box |
|
||||||
| ++h++ | Hide mask |
|
| ++"Delete/Backspace"++ | Erase Bounding Box |
|
||||||
| ++shift+m++ | Invert mask |
|
| ++"C"++ | Select Color Picker |
|
||||||
| ++shift+c++ | Clear mask |
|
| ++"N"++ | Toggle Snap |
|
||||||
| ++shift+j++ | Expand canvas |
|
| ++"Hold Space"++ | Quick Toggle Move |
|
||||||
|
| ++"Q"++ | Toggle Layer |
|
||||||
|
| ++"Shift\+C"++ | Clear Mask |
|
||||||
|
| ++"H"++ | Hide Mask |
|
||||||
|
| ++"Shift\+H"++ | Show/Hide Bounding Box |
|
||||||
|
| ++"Shift\+M"++ | Merge Visible |
|
||||||
|
| ++"Shift\+S"++ | Save To Gallery |
|
||||||
|
| ++"Ctrl\+C"++ | Copy To Clipboard |
|
||||||
|
| ++"Shift\+D"++ | Download Image |
|
||||||
|
| ++"Ctrl\+Z"++ | Undo |
|
||||||
|
| ++"Ctrl\+Y / Ctrl\+Shift\+Z"++ | Redo |
|
||||||
|
| ++"R"++ | Reset View |
|
||||||
|
| ++"Arrow Left"++ | Previous Staging Image |
|
||||||
|
| ++"Arrow Right"++ | Next Staging Image |
|
||||||
|
| ++"Enter"++ | Accept Staging Image |
|
||||||
|
5
docs/features/index.md
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
title: Overview
|
||||||
|
---
|
||||||
|
|
||||||
|
Here you can find the documentation for different features.
|
230
docs/index.md
@ -6,15 +6,14 @@ title: Home
|
|||||||
The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as::
|
The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as::
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install -r requirements-mkdocs.txt
|
pip install -r docs/requirements-mkdocs.txt
|
||||||
mkdocs serve
|
mkdocs serve
|
||||||
```
|
```
|
||||||
-->
|
-->
|
||||||
|
|
||||||
<div align="center" markdown>
|
<div align="center" markdown>
|
||||||
|
|
||||||
# ^^**InvokeAI: A Stable Diffusion Toolkit**^^ :tools: <br> <small>Formerly known as lstein/stable-diffusion</small>
|
[](https://github.com/invoke-ai/InvokeAI)
|
||||||
|
|
||||||
[](https://github.com/invoke-ai/InvokeAI)
|
|
||||||
|
|
||||||
[![discord badge]][discord link]
|
[![discord badge]][discord link]
|
||||||
|
|
||||||
@ -70,7 +69,11 @@ image-to-image generator. It provides a streamlined process with various new
|
|||||||
features and options to aid the image generation process. It runs on Windows,
|
features and options to aid the image generation process. It runs on Windows,
|
||||||
Mac and Linux machines, and runs on GPU cards with as little as 4 GB or RAM.
|
Mac and Linux machines, and runs on GPU cards with as little as 4 GB or RAM.
|
||||||
|
|
||||||
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>]
|
||||||
|
[<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a
|
||||||
|
href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a
|
||||||
|
href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas &
|
||||||
|
Q&A</a>]
|
||||||
|
|
||||||
<div align="center"><img src="assets/invoke-web-server-1.png" width=640></div>
|
<div align="center"><img src="assets/invoke-web-server-1.png" width=640></div>
|
||||||
|
|
||||||
@ -80,11 +83,19 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB or RAM.
|
|||||||
|
|
||||||
## :octicons-package-dependencies-24: Installation
|
## :octicons-package-dependencies-24: Installation
|
||||||
|
|
||||||
This fork is supported across Linux, Windows and Macintosh. Linux
|
This fork is supported across Linux, Windows and Macintosh. Linux users can use
|
||||||
users can use either an Nvidia-based card (with CUDA support) or an
|
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
||||||
AMD card (using the ROCm driver). For full installation and upgrade
|
driver).
|
||||||
instructions, please see:
|
|
||||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
First time users, please see
|
||||||
|
[Automated Installer](installation/INSTALL_AUTOMATED.md) for a walkthrough of
|
||||||
|
getting InvokeAI up and running on your system. For alternative installation and
|
||||||
|
upgrade instructions, please see:
|
||||||
|
[InvokeAI Installation Overview](installation/)
|
||||||
|
|
||||||
|
Linux users who wish to make use of the PyPatchMatch inpainting functions will
|
||||||
|
need to perform a bit of extra work to enable this module. Instructions can be
|
||||||
|
found at [Installing PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md).
|
||||||
|
|
||||||
## :fontawesome-solid-computer: Hardware Requirements
|
## :fontawesome-solid-computer: Hardware Requirements
|
||||||
|
|
||||||
@ -93,25 +104,29 @@ instructions, please see:
|
|||||||
You wil need one of the following:
|
You wil need one of the following:
|
||||||
|
|
||||||
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||||
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux only)
|
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
|
||||||
|
only)
|
||||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||||
|
|
||||||
|
We do **not recommend** the following video cards due to issues with their
|
||||||
|
running in half-precision mode and having insufficient VRAM to render 512x512
|
||||||
|
images in full-precision mode:
|
||||||
|
|
||||||
|
- NVIDIA 10xx series cards such as the 1080ti
|
||||||
|
- GTX 1650 series cards
|
||||||
|
- GTX 1660 series cards
|
||||||
|
|
||||||
### :fontawesome-solid-memory: Memory
|
### :fontawesome-solid-memory: Memory
|
||||||
|
|
||||||
- At least 12 GB Main Memory RAM.
|
- At least 12 GB Main Memory RAM.
|
||||||
|
|
||||||
### :fontawesome-regular-hard-drive: Disk
|
### :fontawesome-regular-hard-drive: Disk
|
||||||
|
|
||||||
- At least 12 GB of free disk space for the machine learning model, Python, and
|
- At least 18 GB of free disk space for the machine learning model, Python, and
|
||||||
all its dependencies.
|
all its dependencies.
|
||||||
|
|
||||||
!!! info
|
!!! info
|
||||||
|
|
||||||
If you are have a Nvidia 10xx series card (e.g. the 1080ti), please run the invoke script in
|
|
||||||
full-precision mode as shown below.
|
|
||||||
|
|
||||||
Similarly, specify full-precision mode on Apple M1 hardware.
|
|
||||||
|
|
||||||
Precision is auto configured based on the device. If however you encounter errors like
|
Precision is auto configured based on the device. If however you encounter errors like
|
||||||
`expected type Float but found Half` or `not implemented for Half` you can try starting
|
`expected type Float but found Half` or `not implemented for Half` you can try starting
|
||||||
`invoke.py` with the `--precision=float32` flag:
|
`invoke.py` with the `--precision=float32` flag:
|
||||||
@ -120,101 +135,114 @@ You wil need one of the following:
|
|||||||
(invokeai) ~/InvokeAI$ python scripts/invoke.py --full_precision
|
(invokeai) ~/InvokeAI$ python scripts/invoke.py --full_precision
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## :octicons-gift-24: InvokeAI Features
|
||||||
|
|
||||||
|
- [The InvokeAI Web Interface](features/WEB.md) -
|
||||||
|
[WebGUI hotkey reference guide](features/WEBUIHOTKEYS.md) -
|
||||||
|
[WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
||||||
|
<!-- seperator -->
|
||||||
|
- [The Command Line Interace](features/CLI.md) -
|
||||||
|
[Image2Image](features/IMG2IMG.md) - [Inpainting](features/INPAINTING.md) -
|
||||||
|
[Outpainting](features/OUTPAINTING.md) -
|
||||||
|
[Adding custom styles and subjects](features/CONCEPTS.md) -
|
||||||
|
[Upscaling and Face Reconstruction](features/POSTPROCESS.md)
|
||||||
|
<!-- seperator -->
|
||||||
|
- [Generating Variations](features/VARIATIONS.md)
|
||||||
|
<!-- seperator -->
|
||||||
|
- [Prompt Engineering](features/PROMPTS.md)
|
||||||
|
<!-- seperator -->
|
||||||
|
- Miscellaneous
|
||||||
|
- [NSFW Checker](features/NSFW.md)
|
||||||
|
- [Embiggen upscaling](features/EMBIGGEN.md)
|
||||||
|
- [Other](features/OTHER.md)
|
||||||
|
|
||||||
## :octicons-log-16: Latest Changes
|
## :octicons-log-16: Latest Changes
|
||||||
|
|
||||||
### v2.1.3 <small>(13 November 2022)</small>
|
### v2.2.4 <small>(11 December 2022)</small>
|
||||||
|
|
||||||
- A choice of installer scripts that automate installation and configuration. See [Installation](https://github.com/invoke-ai/InvokeAI/blob/2.1.3-rc6/docs/installation/INSTALL.md).
|
#### the `invokeai` directory
|
||||||
- A streamlined manual installation process that works for both Conda and PIP-only installs. See [Manual Installation](https://github.com/invoke-ai/InvokeAI/blob/2.1.3-rc6/docs/installation/INSTALL_MANUAL.md).
|
|
||||||
- The ability to save frequently-used startup options (model to load, steps, sampler, etc) in a `.invokeai` file. See [Client](https://github.com/invoke-ai/InvokeAI/blob/2.1.3-rc6/docs/features/CLI.md)
|
|
||||||
- Support for AMD GPU cards (non-CUDA) on Linux machines.
|
|
||||||
- Multiple bugs and edge cases squashed.
|
|
||||||
|
|
||||||
### v2.1.0 <small>(2 November 2022)</small>
|
Previously there were two directories to worry about, the directory that
|
||||||
|
contained the InvokeAI source code and the launcher scripts, and the `invokeai`
|
||||||
|
directory that contained the models files, embeddings, configuration and
|
||||||
|
outputs. With the 2.2.4 release, this dual system is done away with, and
|
||||||
|
everything, including the `invoke.bat` and `invoke.sh` launcher scripts, now
|
||||||
|
live in a directory named `invokeai`. By default this directory is located in
|
||||||
|
your home directory (e.g. `\Users\yourname` on Windows), but you can select
|
||||||
|
where it goes at install time.
|
||||||
|
|
||||||
- [Inpainting](https://invoke-ai.github.io/InvokeAI/features/INPAINTING/)
|
After installation, you can delete the install directory (the one that the zip
|
||||||
support in the WebGUI
|
file creates when it unpacks). Do **not** delete or move the `invokeai`
|
||||||
- Greatly improved navigation and user experience in the
|
directory!
|
||||||
[WebGUI](https://invoke-ai.github.io/InvokeAI/features/WEB/)
|
|
||||||
- The prompt syntax has been enhanced with
|
|
||||||
[prompt weighting, cross-attention and prompt merging](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/).
|
|
||||||
- You can now load
|
|
||||||
[multiple models and switch among them quickly](https://docs.google.com/presentation/d/1WywGA1rny7bpFh7CLSdTr4nNpVKdlUeT0Bj0jCsILyU/edit?usp=sharing)
|
|
||||||
without leaving the CLI.
|
|
||||||
- The installation process (via `scripts/preload_models.py`) now lets you select
|
|
||||||
among several popular
|
|
||||||
[Stable Diffusion models](https://invoke-ai.github.io/InvokeAI/installation/INSTALLING_MODELS/)
|
|
||||||
and downloads and installs them on your behalf. Among other models, this
|
|
||||||
script will install the current Stable Diffusion 1.5 model as well as a
|
|
||||||
StabilityAI variable autoencoder (VAE) which improves face generation.
|
|
||||||
- Tired of struggling with photoeditors to get the masked region of for
|
|
||||||
inpainting just right? Let the AI make the mask for you using
|
|
||||||
[text masking](https://docs.google.com/presentation/d/1pWoY510hCVjz0M6X9CBbTznZgW2W5BYNKrmZm7B45q8/edit#slide=id.p).
|
|
||||||
This feature allows you to specify the part of the image to paint over using
|
|
||||||
just English-language phrases.
|
|
||||||
- Tired of seeing the head of your subjects cropped off? Uncrop them in the CLI
|
|
||||||
with the
|
|
||||||
[outcrop feature](https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/#outcrop).
|
|
||||||
- Tired of seeing your subject's bodies duplicated or mangled when generating
|
|
||||||
larger-dimension images? Check out the `--hires` option in the CLI, or select
|
|
||||||
the corresponding toggle in the WebGUI.
|
|
||||||
- We now support textual inversion and fine-tune .bin styles and subjects from
|
|
||||||
the Hugging Face archive of
|
|
||||||
[SD Concepts](https://huggingface.co/sd-concepts-library). Load the .bin file
|
|
||||||
using the `--embedding_path` option. (The next version will support merging
|
|
||||||
and loading of multiple simultaneous models).
|
|
||||||
- ...
|
|
||||||
|
|
||||||
### v2.0.1 <small>(13 October 2022)</small>
|
##### Initialization file `invokeai/invokeai.init`
|
||||||
|
|
||||||
- fix noisy images at high step count when using k\* samplers
|
You can place frequently-used startup options in this file, such as the default
|
||||||
- dream.py script now calls invoke.py module directly rather than via a new
|
number of steps or your preferred sampler. To keep everything in one place, this
|
||||||
python process (which could break the environment)
|
file has now been moved into the `invokeai` directory and is named
|
||||||
|
`invokeai.init`.
|
||||||
|
|
||||||
### v2.0.0 <small>(9 October 2022)</small>
|
#### To update from Version 2.2.3
|
||||||
|
|
||||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains for
|
The easiest route is to download and unpack one of the 2.2.4 installer files.
|
||||||
backward compatibility.
|
When it asks you for the location of the `invokeai` runtime directory, respond
|
||||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
with the path to the directory that contains your 2.2.3 `invokeai`. That is, if
|
||||||
- Support for
|
`invokeai` lives at `C:\Users\fred\invokeai`, then answer with `C:\Users\fred`
|
||||||
<a href="https://invoke-ai.github.io/InvokeAI/features/INPAINTING/">inpainting</a>
|
and answer "Y" when asked if you want to reuse the directory.
|
||||||
and
|
|
||||||
<a href="https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/">outpainting</a>
|
The `update.sh` (`update.bat`) script that came with the 2.2.3 source installer
|
||||||
- img2img runs on all k\* samplers
|
does not know about the new directory layout and won't be fully functional.
|
||||||
- Support for
|
|
||||||
<a href="https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts">negative
|
#### To update to 2.2.5 (and beyond) there's now an update path.
|
||||||
prompts</a>
|
|
||||||
- Support for CodeFormer face reconstruction
|
As they become available, you can update to more recent versions of InvokeAI
|
||||||
- Support for Textual Inversion on Macintoshes
|
using an `update.sh` (`update.bat`) script located in the `invokeai` directory.
|
||||||
- Support in both WebGUI and CLI for
|
Running it without any arguments will install the most recent version of
|
||||||
<a href="https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/">post-processing
|
InvokeAI. Alternatively, you can get set releases by running the `update.sh`
|
||||||
of previously-generated images</a> using facial reconstruction, ESRGAN
|
script with an argument in the command shell. This syntax accepts the path to
|
||||||
upscaling, outcropping (similar to DALL-E infinite canvas), and "embiggen"
|
the desired release's zip file, which you can find by clicking on the green
|
||||||
upscaling. See the `!fix` command.
|
"Code" button on this repository's home page.
|
||||||
- New `--hires` option on `invoke>` line allows
|
|
||||||
<a href="https://invoke-ai.github.io/InvokeAI/features/CLI/#txt2img">larger
|
#### Other 2.2.4 Improvements
|
||||||
images to be created without duplicating elements</a>, at the cost of some
|
|
||||||
performance.
|
- Fix InvokeAI GUI initialization by @addianto in #1687
|
||||||
- New `--perlin` and `--threshold` options allow you to add and control
|
- fix link in documentation by @lstein in #1728
|
||||||
variation during image generation (see
|
- Fix broken link by @ShawnZhong in #1736
|
||||||
<a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding
|
- Remove reference to binary installer by @lstein in #1731
|
||||||
and Perlin Noise Initialization</a>
|
- documentation fixes for 2.2.3 by @lstein in #1740
|
||||||
- Extensive metadata now written into PNG files, allowing reliable regeneration
|
- Modify installer links to point closer to the source installer by @ebr in
|
||||||
of images and tweaking of previous settings.
|
#1745
|
||||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac
|
- add documentation warning about 1650/60 cards by @lstein in #1753
|
||||||
platforms.
|
- Fix Linux source URL in installation docs by @andybearman in #1756
|
||||||
- Improved
|
- Make install instructions discoverable in readme by @damian0815 in #1752
|
||||||
<a href="https://invoke-ai.github.io/InvokeAI/features/CLI/">command-line
|
- typo fix by @ofirkris in #1755
|
||||||
completion behavior</a>. New commands added:
|
- Non-interactive model download (support HUGGINGFACE_TOKEN) by @ebr in #1578
|
||||||
- List command-line history with `!history`
|
- fix(srcinstall): shell installer - cp scripts instead of linking by @tildebyte
|
||||||
- Search command-line history with `!search`
|
in #1765
|
||||||
- Clear history with `!clear`
|
- stability and usage improvements to binary & source installers by @lstein in
|
||||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
#1760
|
||||||
configure. To switch away from auto use the new flag like
|
- fix off-by-one bug in cross-attention-control by @damian0815 in #1774
|
||||||
`--precision=float32`.
|
- Eventually update APP_VERSION to 2.2.3 by @spezialspezial in #1768
|
||||||
|
- invoke script cds to its location before running by @lstein in #1805
|
||||||
|
- Make PaperCut and VoxelArt models load again by @lstein in #1730
|
||||||
|
- Fix --embedding_directory / --embedding_path not working by @blessedcoolant in
|
||||||
|
#1817
|
||||||
|
- Clean up readme by @hipsterusername in #1820
|
||||||
|
- Optimized Docker build with support for external working directory by @ebr in
|
||||||
|
#1544
|
||||||
|
- disable pushing the cloud container by @mauwii in #1831
|
||||||
|
- Fix docker push github action and expand with additional metadata by @ebr in
|
||||||
|
#1837
|
||||||
|
- Fix Broken Link To Notebook by @VedantMadane in #1821
|
||||||
|
- Account for flat models by @spezialspezial in #1766
|
||||||
|
- Update invoke.bat.in isolate environment variables by @lynnewu in #1833
|
||||||
|
- Arch Linux Specific PatchMatch Instructions & fixing conda install on linux by
|
||||||
|
@SammCheese in #1848
|
||||||
|
- Make force free GPU memory work in img2img by @addianto in #1844
|
||||||
|
- New installer by @lstein
|
||||||
|
|
||||||
For older changelogs, please visit the
|
For older changelogs, please visit the
|
||||||
**[CHANGELOG](CHANGELOG/#v114-11-september-2022)**.
|
**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
|
||||||
|
|
||||||
## :material-target: Troubleshooting
|
## :material-target: Troubleshooting
|
||||||
|
|
||||||
|
315
docs/installation/010_INSTALL_AUTOMATED.md
Normal file
@ -0,0 +1,315 @@
|
|||||||
|
---
|
||||||
|
title: Installing with the Automated Installer
|
||||||
|
---
|
||||||
|
|
||||||
|
# InvokeAI Automated Installation
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
The automated installer is a shell script that attempts to automate every step
|
||||||
|
needed to install and run InvokeAI on a stock computer running recent versions
|
||||||
|
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
||||||
|
version of InvokeAI with the option to upgrade to experimental versions later.
|
||||||
|
|
||||||
|
## Walk through
|
||||||
|
|
||||||
|
1. Make sure that your system meets the
|
||||||
|
[hardware requirements](../index.md#hardware-requirements) and has the
|
||||||
|
appropriate GPU drivers installed. In particular, if you are a Linux user
|
||||||
|
with an AMD GPU installed, you may need to install the
|
||||||
|
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||||
|
|
||||||
|
!!! info "Required Space"
|
||||||
|
|
||||||
|
Installation requires roughly 18G of free disk space to load the libraries and
|
||||||
|
recommended model weights files.
|
||||||
|
|
||||||
|
Regardless of your destination disk, your *system drive* (`C:\` on Windows, `/` on macOS/Linux) requires at least 6GB of free disk space to download and cache python dependencies. NOTE for Linux users: if your temporary directory is mounted as a `tmpfs`, ensure it has sufficient space.
|
||||||
|
|
||||||
|
2. Check that your system has an up-to-date Python installed. To do this, open
|
||||||
|
up a command-line window ("Terminal" on Linux and Macintosh, "Command" or
|
||||||
|
"Powershell" on Windows) and type `python --version`. If Python is
|
||||||
|
installed, it will print out the version number. If it is version `3.9.1` or
|
||||||
|
higher, you meet requirements.
|
||||||
|
|
||||||
|
!!! warning "If you see an older version, or get a command not found error"
|
||||||
|
|
||||||
|
Go to [Python Downloads](https://www.python.org/downloads/) and
|
||||||
|
download the appropriate installer package for your platform. We recommend
|
||||||
|
[Version 3.10.9](https://www.python.org/downloads/release/python-3109/),
|
||||||
|
which has been extensively tested with InvokeAI.
|
||||||
|
|
||||||
|
!!! warning "At this time we do not recommend Python 3.11"
|
||||||
|
|
||||||
|
_Please select your platform in the section below for platform-specific
|
||||||
|
setup requirements._
|
||||||
|
|
||||||
|
=== "Windows users"
|
||||||
|
|
||||||
|
- During the Python configuration process,
|
||||||
|
look out for a checkbox to add Python to your PATH
|
||||||
|
and select it. If the install script complains that it can't
|
||||||
|
find python, then open the Python installer again and choose
|
||||||
|
"Modify" existing installation.
|
||||||
|
|
||||||
|
- Installation requires an up to date version of the Microsoft Visual C libraries. Please install the 2015-2022 libraries available here: https://learn.microsoft.com/en-us/cpp/windows/deploying-native-desktop-applications-visual-cpp?view=msvc-170
|
||||||
|
|
||||||
|
=== "Mac users"
|
||||||
|
|
||||||
|
- After installing Python, you may need to run the
|
||||||
|
following command from the Terminal in order to install the Web
|
||||||
|
certificates needed to download model data from https sites. If
|
||||||
|
you see lots of CERTIFICATE ERRORS during the last part of the
|
||||||
|
install, this is the problem, and you can fix it with this command:
|
||||||
|
|
||||||
|
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||||
|
|
||||||
|
- You may need to install the Xcode command line tools. These
|
||||||
|
are a set of tools that are needed to run certain applications in a
|
||||||
|
Terminal, including InvokeAI. This package is provided directly by Apple.
|
||||||
|
|
||||||
|
- To install, open a terminal window and run `xcode-select
|
||||||
|
--install`. You will get a macOS system popup guiding you through the
|
||||||
|
install. If you already have them installed, you will instead see some
|
||||||
|
output in the Terminal advising you that the tools are already installed.
|
||||||
|
|
||||||
|
- More information can be found here:
|
||||||
|
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
|
||||||
|
|
||||||
|
=== "Linux users"
|
||||||
|
|
||||||
|
For reasons that are not entirely clear, installing the correct version of Python can be a bit of a challenge on Ubuntu, Linux Mint, Pop!_OS, and other Debian-derived distributions.
|
||||||
|
|
||||||
|
On Ubuntu 22.04 and higher, run the following:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y python3 python3-pip python3-venv
|
||||||
|
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
|
||||||
|
```
|
||||||
|
|
||||||
|
On Ubuntu 20.04, the process is slightly different:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y software-properties-common
|
||||||
|
sudo add-apt-repository -y ppa:deadsnakes/ppa
|
||||||
|
sudo apt install python3.10 python3-pip python3.10-venv
|
||||||
|
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
|
||||||
|
```
|
||||||
|
|
||||||
|
Both `python` and `python3` commands are now pointing at Python3.10. You can still access older versions of Python by calling `python2`, `python3.8`, etc.
|
||||||
|
|
||||||
|
Linux systems require a couple of additional graphics libraries to be installed for proper functioning of `python3-opencv`. Please run the following:
|
||||||
|
|
||||||
|
`sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
|
||||||
|
|
||||||
|
3. The source installer is distributed in ZIP files. Go to the
|
||||||
|
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||||
|
look for a series of files named:
|
||||||
|
|
||||||
|
- [InvokeAI-installer-2.2.4-p5-mac.zip](https://github.com/invoke-ai/InvokeAI/files/10254728/InvokeAI-installer-2.2.4-p5-mac.zip)
|
||||||
|
- [InvokeAI-installer-2.2.4-p5-windows.zip](https://github.com/invoke-ai/InvokeAI/files/10254729/InvokeAI-installer-2.2.4-p5-windows.zip)
|
||||||
|
- [InvokeAI-installer-2.2.4-p5-linux.zip](https://github.com/invoke-ai/InvokeAI/files/10254727/InvokeAI-installer-2.2.4-p5-linux.zip)
|
||||||
|
|
||||||
|
Download the one that is appropriate for your operating system.
|
||||||
|
|
||||||
|
4. Unpack the zip file into a convenient directory. This will create a new
|
||||||
|
directory named "InvokeAI-Installer". This example shows how this would look
|
||||||
|
using the `unzip` command-line tool, but you may use any graphical or
|
||||||
|
command-line Zip extractor:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
C:\Documents\Linco> unzip InvokeAI-installer-2.2.4-windows.zip
|
||||||
|
Archive: C: \Linco\Downloads\InvokeAI-installer-2.2.4-windows.zip
|
||||||
|
creating: InvokeAI-Installer\
|
||||||
|
inflating: InvokeAI-Installer\install.bat
|
||||||
|
inflating: InvokeAI-Installer\readme.txt
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
After successful installation, you can delete the `InvokeAI-Installer`
|
||||||
|
directory.
|
||||||
|
|
||||||
|
5. **Windows only** Please double-click on the file WinLongPathsEnabled.reg and
|
||||||
|
accept the dialog box that asks you if you wish to modify your registry.
|
||||||
|
This activates long filename support on your system and will prevent
|
||||||
|
mysterious errors during installation.
|
||||||
|
|
||||||
|
6. If you are using a desktop GUI, double-click the installer file. It will be
|
||||||
|
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||||
|
Macintosh systems.
|
||||||
|
|
||||||
|
On Windows systems you will probably get an "Untrusted Publisher" warning.
|
||||||
|
Click on "More Info" and select "Run Anyway." You trust us, right?
|
||||||
|
|
||||||
|
7. Alternatively, from the command line, run the shell script or .bat file:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
C:\Documents\Linco> cd InvokeAI-Installer
|
||||||
|
C:\Documents\Linco\invokeAI> install.bat
|
||||||
|
```
|
||||||
|
|
||||||
|
8. The script will ask you to choose where to install InvokeAI. Select a
|
||||||
|
directory with at least 18G of free space for a full install. InvokeAI and
|
||||||
|
all its support files will be installed into a new directory named
|
||||||
|
`invokeai` located at the location you specify.
|
||||||
|
|
||||||
|
- The default is to install the `invokeai` directory in your home directory,
|
||||||
|
usually `C:\Users\YourName\invokeai` on Windows systems,
|
||||||
|
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
|
||||||
|
on Macintoshes, where "YourName" is your login name.
|
||||||
|
|
||||||
|
- The script uses tab autocompletion to suggest directory path completions.
|
||||||
|
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
|
||||||
|
to suggest completions.
|
||||||
|
|
||||||
|
9. Sit back and let the install script work. It will install the third-party
|
||||||
|
libraries needed by InvokeAI, then download the current InvokeAI release and
|
||||||
|
install it.
|
||||||
|
|
||||||
|
Be aware that some of the library download and install steps take a long
|
||||||
|
time. In particular, the `pytorch` package is quite large and often appears
|
||||||
|
to get "stuck" at 99.9%. Have patience and the installation step will
|
||||||
|
eventually resume. However, there are occasions when the library install
|
||||||
|
does legitimately get stuck. If you have been waiting for more than ten
|
||||||
|
minutes and nothing is happening, you can interrupt the script with ^C. You
|
||||||
|
may restart it and it will pick up where it left off.
|
||||||
|
|
||||||
|
10. After installation completes, the installer will launch a script called
|
||||||
|
`configure_invokeai.py`, which will guide you through the first-time process
|
||||||
|
of selecting one or more Stable Diffusion model weights files, downloading
|
||||||
|
and configuring them. We provide a list of popular models that InvokeAI
|
||||||
|
performs well with. However, you can add more weight files later on using
|
||||||
|
the command-line client or the Web UI. See
|
||||||
|
[Installing Models](050_INSTALLING_MODELS.md) for details.
|
||||||
|
|
||||||
|
Note that the main Stable Diffusion weights file is protected by a license
|
||||||
|
agreement that you must agree to in order to use. The script will list the
|
||||||
|
steps you need to take to create an account on the official site that hosts
|
||||||
|
the weights files, accept the agreement, and provide an access token that
|
||||||
|
allows InvokeAI to legally download and install the weights files.
|
||||||
|
|
||||||
|
If you have already downloaded the weights file(s) for another Stable
|
||||||
|
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||||
|
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||||
|
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
|
||||||
|
|
||||||
|
11. The script will now exit and you'll be ready to generate some images. Look
|
||||||
|
for the directory `invokeai` installed in the location you chose at the
|
||||||
|
beginning of the install session. Look for a shell script named `invoke.sh`
|
||||||
|
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
|
||||||
|
it or typing its name at the command-line:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
C:\Documents\Linco> cd invokeai
|
||||||
|
C:\Documents\Linco\invokeAI> invoke.bat
|
||||||
|
```
|
||||||
|
|
||||||
|
- The `invoke.bat` (`invoke.sh`) script will give you the choice of starting
|
||||||
|
(1) the command-line interface, or (2) the web GUI. If you start the
|
||||||
|
latter, you can load the user interface by pointing your browser at
|
||||||
|
http://localhost:9090.
|
||||||
|
|
||||||
|
- The script also offers you a third option labeled "open the developer
|
||||||
|
console". If you choose this option, you will be dropped into a
|
||||||
|
command-line interface in which you can run python commands directly,
|
||||||
|
access developer tools, and launch InvokeAI with customized options.
|
||||||
|
|
||||||
|
12. You can launch InvokeAI with several different command-line arguments that
|
||||||
|
customize its behavior. For example, you can change the location of the
|
||||||
|
image output directory, or select your favorite sampler. See the
|
||||||
|
[Command-Line Interface](../features/CLI.md) for a full list of the options.
|
||||||
|
|
||||||
|
- To set defaults that will take effect every time you launch InvokeAI,
|
||||||
|
use a text editor (e.g. Notepad) to exit the file
|
||||||
|
`invokeai\invokeai.init`. It contains a variety of examples that you can
|
||||||
|
follow to add and modify launch options.
|
||||||
|
|
||||||
|
!!! warning "The `invokeai` directory contains the `invoke` application, its
|
||||||
|
configuration files, the model weight files, and outputs of image generation.
|
||||||
|
Once InvokeAI is installed, do not move or remove this directory."
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### _Package dependency conflicts_
|
||||||
|
|
||||||
|
If you have previously installed InvokeAI or another Stable Diffusion package,
|
||||||
|
the installer may occasionally pick up outdated libraries and either the
|
||||||
|
installer or `invoke` will fail with complaints about library conflicts. You can
|
||||||
|
address this by entering the `invokeai` directory and running `update.sh`, which
|
||||||
|
will bring InvokeAI up to date with the latest libraries.
|
||||||
|
|
||||||
|
### ldm from pypi
|
||||||
|
|
||||||
|
!!! warning
|
||||||
|
|
||||||
|
Some users have tried to correct dependency problems by installing
|
||||||
|
the `ldm` package from PyPi.org. Unfortunately this is an unrelated package that
|
||||||
|
has nothing to do with the 'latent diffusion model' used by InvokeAI. Installing
|
||||||
|
ldm will make matters worse. If you've installed ldm, uninstall it with
|
||||||
|
`pip uninstall ldm`.
|
||||||
|
|
||||||
|
### Corrupted configuration file
|
||||||
|
|
||||||
|
Everything seems to install ok, but `invoke` complains of a corrupted
|
||||||
|
configuration file and goes back into the configuration process (asking you to
|
||||||
|
download models, etc), but this doesn't fix the problem.
|
||||||
|
|
||||||
|
This issue is often caused by a misconfigured configuration directive in the
|
||||||
|
`invokeai\invokeai.init` initialization file that contains startup settings. The
|
||||||
|
easiest way to fix the problem is to move the file out of the way and re-run
|
||||||
|
`configure_invokeai.py`. Enter the developer's console (option 3 of the launcher
|
||||||
|
script) and run this command:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
configure_invokeai.py --root=.
|
||||||
|
```
|
||||||
|
|
||||||
|
Note the dot (.) after `--root`. It is part of the command.
|
||||||
|
|
||||||
|
_If none of these maneuvers fixes the problem_ then please report the problem to
|
||||||
|
the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
||||||
|
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
|
||||||
|
assistance.
|
||||||
|
|
||||||
|
### other problems
|
||||||
|
|
||||||
|
If you run into problems during or after installation, the InvokeAI team is
|
||||||
|
available to help you. Either create an
|
||||||
|
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
||||||
|
make a request for help on the "bugs-and-support" channel of our
|
||||||
|
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
||||||
|
organization, but typically somebody will be available to help you within 24
|
||||||
|
hours, and often much sooner.
|
||||||
|
|
||||||
|
## Updating to newer versions
|
||||||
|
|
||||||
|
This distribution is changing rapidly, and we add new features on a daily basis.
|
||||||
|
To update to the latest released version (recommended), run the `update.sh`
|
||||||
|
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
||||||
|
release and re-run the `configure_invokeai` script to download any updated
|
||||||
|
models files that may be needed. You can also use this to add additional models
|
||||||
|
that you did not select at installation time.
|
||||||
|
|
||||||
|
You can now close the developer console and run `invoke` as before. If you get
|
||||||
|
complaints about missing models, then you may need to do the additional step of
|
||||||
|
running `configure_invokeai.py`. This happens relatively infrequently. To do
|
||||||
|
this, simply open up the developer's console again and type
|
||||||
|
`python scripts/configure_invokeai.py`.
|
||||||
|
|
||||||
|
You may also use the `update` script to install any selected version of
|
||||||
|
InvokeAI. From https://github.com/invoke-ai/InvokeAI, navigate to the zip file
|
||||||
|
link of the version you wish to install. You can find the zip links by going to
|
||||||
|
the one of the release pages and looking for the **Assets** section at the
|
||||||
|
bottom. Alternatively, you can browse "branches" and "tags" at the top of the
|
||||||
|
big code directory on the InvokeAI welcome page. When you find the version you
|
||||||
|
want to install, go to the green "<> Code" button at the top, and copy the
|
||||||
|
"Download ZIP" link.
|
||||||
|
|
||||||
|
Now run `update.sh` (or `update.bat`) with the URL of the desired InvokeAI
|
||||||
|
version as its argument. For example, this will install the old 2.2.0 release.
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
update.sh https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.0.zip
|
||||||
|
```
|
589
docs/installation/020_INSTALL_MANUAL.md
Normal file
@ -0,0 +1,589 @@
|
|||||||
|
---
|
||||||
|
title: Installing Manually
|
||||||
|
---
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
# :fontawesome-brands-linux: Linux | :fontawesome-brands-apple: macOS | :fontawesome-brands-windows: Windows
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
!!! warning "This is for advanced Users"
|
||||||
|
|
||||||
|
who are already experienced with using conda or pip
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
You have two choices for manual installation, the [first
|
||||||
|
one](#PIP_method) uses basic Python virtual environment (`venv`)
|
||||||
|
commands and the PIP package manager. The [second one](#Conda_method)
|
||||||
|
based on the Anaconda3 package manager (`conda`). Both methods require
|
||||||
|
you to enter commands on the terminal, also known as the "console".
|
||||||
|
|
||||||
|
Note that the conda install method is currently deprecated and will not
|
||||||
|
be supported at some point in the future.
|
||||||
|
|
||||||
|
On Windows systems you are encouraged to install and use the
|
||||||
|
[Powershell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
|
||||||
|
which provides compatibility with Linux and Mac shells and nice
|
||||||
|
features such as command-line completion.
|
||||||
|
|
||||||
|
## pip Install
|
||||||
|
|
||||||
|
To install InvokeAI with virtual environments and the PIP package
|
||||||
|
manager, please follow these steps:
|
||||||
|
|
||||||
|
1. Make sure you are using Python 3.9 or 3.10. The rest of the install
|
||||||
|
procedure depends on this:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -V
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
|
||||||
|
GitHub:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||||
|
```
|
||||||
|
|
||||||
|
This will create InvokeAI folder where you will follow the rest of the
|
||||||
|
steps.
|
||||||
|
|
||||||
|
3. From within the InvokeAI top-level directory, create and activate a virtual
|
||||||
|
environment named `invokeai`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -mvenv invokeai
|
||||||
|
source invokeai/bin/activate
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Make sure that pip is installed in your virtual environment an up to date:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -mensurepip --upgrade
|
||||||
|
python -mpip install --upgrade pip
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Pick the correct `requirements*.txt` file for your hardware and operating
|
||||||
|
system.
|
||||||
|
|
||||||
|
We have created a series of environment files suited for different operating
|
||||||
|
systems and GPU hardware. They are located in the
|
||||||
|
`environments-and-requirements` directory:
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
|
||||||
|
| filename | OS |
|
||||||
|
| :---------------------------------: | :-------------------------------------------------------------: |
|
||||||
|
| requirements-lin-amd.txt | Linux with an AMD (ROCm) GPU |
|
||||||
|
| requirements-lin-arm64.txt | Linux running on arm64 systems |
|
||||||
|
| requirements-lin-cuda.txt | Linux with an NVIDIA (CUDA) GPU |
|
||||||
|
| requirements-mac-mps-cpu.txt | Macintoshes with MPS acceleration |
|
||||||
|
| requirements-lin-win-colab-cuda.txt | Windows with an NVIDA (CUDA) GPU<br>(supports Google Colab too) |
|
||||||
|
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
Select the appropriate requirements file, and make a link to it from
|
||||||
|
`requirements.txt` in the top-level InvokeAI directory. The command to do
|
||||||
|
this from the top-level directory is:
|
||||||
|
|
||||||
|
!!! example ""
|
||||||
|
|
||||||
|
=== "Macintosh and Linux"
|
||||||
|
|
||||||
|
!!! info "Replace `xxx` and `yyy` with the appropriate OS and GPU codes."
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ln -sf environments-and-requirements/requirements-xxx-yyy.txt requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Windows"
|
||||||
|
|
||||||
|
!!! info "on Windows, admin privileges are required to make links, so we use the copy command instead"
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
copy environments-and-requirements\requirements-lin-win-colab-cuda.txt requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! warning
|
||||||
|
|
||||||
|
Please do not link or copy `environments-and-requirements/requirements-base.txt`.
|
||||||
|
This is a base requirements file that does not have the platform-specific
|
||||||
|
libraries. Also, be sure to link or copy the platform-specific file to
|
||||||
|
a top-level file named `requirements.txt` as shown here. Running pip on
|
||||||
|
a requirements file in a subdirectory will not work as expected.
|
||||||
|
|
||||||
|
When this is done, confirm that a file named `requirements.txt` has been
|
||||||
|
created in the InvokeAI root directory and that it points to the correct
|
||||||
|
file in `environments-and-requirements`.
|
||||||
|
|
||||||
|
6. Run PIP
|
||||||
|
|
||||||
|
Be sure that the `invokeai` environment is active before doing this:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install --prefer-binary -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
7. Set up the runtime directory
|
||||||
|
|
||||||
|
In this step you will initialize a runtime directory that will
|
||||||
|
contain the models, model config files, directory for textual
|
||||||
|
inversion embeddings, and your outputs. This keeps the runtime
|
||||||
|
directory separate from the source code and aids in updating.
|
||||||
|
|
||||||
|
You may pick any location for this directory using the `--root_dir`
|
||||||
|
option (abbreviated --root). If you don't pass this option, it will
|
||||||
|
default to `invokeai` in your home directory.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
configure_invokeai.py --root_dir ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
The script `configure_invokeai.py` will interactively guide you through the
|
||||||
|
process of downloading and installing the weights files needed for InvokeAI.
|
||||||
|
Note that the main Stable Diffusion weights file is protected by a license
|
||||||
|
agreement that you have to agree to. The script will list the steps you need
|
||||||
|
to take to create an account on the site that hosts the weights files,
|
||||||
|
accept the agreement, and provide an access token that allows InvokeAI to
|
||||||
|
legally download and install the weights files.
|
||||||
|
|
||||||
|
If you get an error message about a module not being installed, check that
|
||||||
|
the `invokeai` environment is active and if not, repeat step 5.
|
||||||
|
|
||||||
|
Note that `configure_invokeai.py` and `invoke.py` should be installed
|
||||||
|
under your virtual environment directory and the system should find them
|
||||||
|
on the PATH. If this isn't working on your system, you can call the
|
||||||
|
scripts directory using `python scripts/configure_invokeai.py` and
|
||||||
|
`python scripts/invoke.py`.
|
||||||
|
|
||||||
|
!!! tip
|
||||||
|
|
||||||
|
If you have already downloaded the weights file(s) for another Stable
|
||||||
|
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||||
|
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||||
|
process for this is described in [here](050_INSTALLING_MODELS.md).
|
||||||
|
|
||||||
|
8. Run the command-line- or the web- interface:
|
||||||
|
|
||||||
|
Activate the environment (with `source invokeai/bin/activate`), and then
|
||||||
|
run the script `invoke.py`. If you selected a non-default location
|
||||||
|
for the runtime directory, please specify the path with the `--root_dir`
|
||||||
|
option (abbreviated below as `--root`):
|
||||||
|
|
||||||
|
!!! example ""
|
||||||
|
|
||||||
|
!!! warning "Make sure that the virtual environment is activated, which should create `(invokeai)` in front of your prompt!"
|
||||||
|
|
||||||
|
=== "CLI"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke.py --root ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "local Webserver"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke.py --web --root ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Public Webserver"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke.py --web --host 0.0.0.0 --root ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
If you choose the run the web interface, point your browser at
|
||||||
|
http://localhost:9090 in order to load the GUI.
|
||||||
|
|
||||||
|
!!! tip
|
||||||
|
|
||||||
|
You can permanently set the location of the runtime directory by setting the environment variable INVOKEAI_ROOT to the path of the directory.
|
||||||
|
|
||||||
|
9. Render away!
|
||||||
|
|
||||||
|
Browse the [features](../features/CLI.md) section to learn about all the things you
|
||||||
|
can do with InvokeAI.
|
||||||
|
|
||||||
|
Note that some GPUs are slow to warm up. In particular, when using an AMD
|
||||||
|
card with the ROCm driver, you may have to wait for over a minute the first
|
||||||
|
time you try to generate an image. Fortunately, after the warm up period
|
||||||
|
rendering will be fast.
|
||||||
|
|
||||||
|
10. Subsequently, to relaunch the script, be sure to run "conda activate
|
||||||
|
invokeai", enter the `InvokeAI` directory, and then launch the invoke
|
||||||
|
script. If you forget to activate the 'invokeai' environment, the script
|
||||||
|
will fail with multiple `ModuleNotFound` errors.
|
||||||
|
|
||||||
|
!!! tip
|
||||||
|
|
||||||
|
Do not move the source code repository after installation. The virtual environment directory has absolute paths in it that get confused if the directory is moved.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Conda method
|
||||||
|
|
||||||
|
1. Check that your system meets the
|
||||||
|
[hardware requirements](index.md#Hardware_Requirements) and has the
|
||||||
|
appropriate GPU drivers installed. In particular, if you are a Linux user
|
||||||
|
with an AMD GPU installed, you may need to install the
|
||||||
|
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||||
|
|
||||||
|
InvokeAI does not yet support Windows machines with AMD GPUs due to the lack
|
||||||
|
of ROCm driver support on this platform.
|
||||||
|
|
||||||
|
To confirm that the appropriate drivers are installed, run `nvidia-smi` on
|
||||||
|
NVIDIA/CUDA systems, and `rocm-smi` on AMD systems. These should return
|
||||||
|
information about the installed video card.
|
||||||
|
|
||||||
|
Macintosh users with MPS acceleration, or anybody with a CPU-only system,
|
||||||
|
can skip this step.
|
||||||
|
|
||||||
|
2. You will need to install Anaconda3 and Git if they are not already
|
||||||
|
available. Use your operating system's preferred package manager, or
|
||||||
|
download the installers manually. You can find them here:
|
||||||
|
|
||||||
|
- [Anaconda3](https://www.anaconda.com/)
|
||||||
|
- [git](https://git-scm.com/downloads)
|
||||||
|
|
||||||
|
3. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
|
||||||
|
GitHub:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||||
|
```
|
||||||
|
|
||||||
|
This will create InvokeAI folder where you will follow the rest of the
|
||||||
|
steps.
|
||||||
|
|
||||||
|
4. Enter the newly-created InvokeAI folder:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd InvokeAI
|
||||||
|
```
|
||||||
|
|
||||||
|
From this step forward make sure that you are working in the InvokeAI
|
||||||
|
directory!
|
||||||
|
|
||||||
|
5. Select the appropriate environment file:
|
||||||
|
|
||||||
|
We have created a series of environment files suited for different operating
|
||||||
|
systems and GPU hardware. They are located in the
|
||||||
|
`environments-and-requirements` directory:
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
|
||||||
|
| filename | OS |
|
||||||
|
| :----------------------: | :----------------------------: |
|
||||||
|
| environment-lin-amd.yml | Linux with an AMD (ROCm) GPU |
|
||||||
|
| environment-lin-cuda.yml | Linux with an NVIDIA CUDA GPU |
|
||||||
|
| environment-mac.yml | Macintosh |
|
||||||
|
| environment-win-cuda.yml | Windows with an NVIDA CUDA GPU |
|
||||||
|
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
Choose the appropriate environment file for your system and link or copy it
|
||||||
|
to `environment.yml` in InvokeAI's top-level directory. To do so, run
|
||||||
|
following command from the repository-root:
|
||||||
|
|
||||||
|
!!! Example ""
|
||||||
|
|
||||||
|
=== "Macintosh and Linux"
|
||||||
|
|
||||||
|
!!! todo "Replace `xxx` and `yyy` with the appropriate OS and GPU codes as seen in the table above"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ln -sf environments-and-requirements/environment-xxx-yyy.yml environment.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
When this is done, confirm that a file `environment.yml` has been linked in
|
||||||
|
the InvokeAI root directory and that it points to the correct file in the
|
||||||
|
`environments-and-requirements`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ls -la
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Windows"
|
||||||
|
|
||||||
|
!!! todo " Since it requires admin privileges to create links, we will use the copy command to create your `environment.yml`"
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
copy environments-and-requirements\environment-win-cuda.yml environment.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
Afterwards verify that the file `environment.yml` has been created, either via the
|
||||||
|
explorer or by using the command `dir` from the terminal
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
dir
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
|
||||||
|
|
||||||
|
6. Create the conda environment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conda env update
|
||||||
|
```
|
||||||
|
|
||||||
|
This will create a new environment named `invokeai` and install all InvokeAI
|
||||||
|
dependencies into it. If something goes wrong you should take a look at
|
||||||
|
[troubleshooting](#troubleshooting).
|
||||||
|
|
||||||
|
7. Activate the `invokeai` environment:
|
||||||
|
|
||||||
|
In order to use the newly created environment you will first need to
|
||||||
|
activate it
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conda activate invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
Your command-line prompt should change to indicate that `invokeai` is active
|
||||||
|
by prepending `(invokeai)`.
|
||||||
|
|
||||||
|
8. Set up the runtime directory
|
||||||
|
|
||||||
|
In this step you will initialize a runtime directory that will
|
||||||
|
contain the models, model config files, directory for textual
|
||||||
|
inversion embeddings, and your outputs. This keeps the runtime
|
||||||
|
directory separate from the source code and aids in updating.
|
||||||
|
|
||||||
|
You may pick any location for this directory using the `--root_dir`
|
||||||
|
option (abbreviated --root). If you don't pass this option, it will
|
||||||
|
default to `invokeai` in your home directory.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python scripts/configure_invokeai.py --root_dir ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
The script `configure_invokeai.py` will interactively guide you through the
|
||||||
|
process of downloading and installing the weights files needed for InvokeAI.
|
||||||
|
Note that the main Stable Diffusion weights file is protected by a license
|
||||||
|
agreement that you have to agree to. The script will list the steps you need
|
||||||
|
to take to create an account on the site that hosts the weights files,
|
||||||
|
accept the agreement, and provide an access token that allows InvokeAI to
|
||||||
|
legally download and install the weights files.
|
||||||
|
|
||||||
|
If you get an error message about a module not being installed, check that
|
||||||
|
the `invokeai` environment is active and if not, repeat step 5.
|
||||||
|
|
||||||
|
Note that `configure_invokeai.py` and `invoke.py` should be
|
||||||
|
installed under your conda directory and the system should find
|
||||||
|
them automatically on the PATH. If this isn't working on your
|
||||||
|
system, you can call the scripts directory using `python
|
||||||
|
scripts/configure_invoke.py` and `python scripts/invoke.py`.
|
||||||
|
|
||||||
|
!!! tip
|
||||||
|
|
||||||
|
If you have already downloaded the weights file(s) for another Stable
|
||||||
|
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||||
|
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||||
|
process for this is described in [here](050_INSTALLING_MODELS.md).
|
||||||
|
|
||||||
|
9. Run the command-line- or the web- interface:
|
||||||
|
|
||||||
|
Activate the environment (with `source invokeai/bin/activate`), and then
|
||||||
|
run the script `invoke.py`. If you selected a non-default location
|
||||||
|
for the runtime directory, please specify the path with the `--root_dir`
|
||||||
|
option (abbreviated below as `--root`):
|
||||||
|
|
||||||
|
!!! example ""
|
||||||
|
|
||||||
|
!!! warning "Make sure that the conda environment is activated, which should create `(invokeai)` in front of your prompt!"
|
||||||
|
|
||||||
|
=== "CLI"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke.py --root ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "local Webserver"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke.py --web --root ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Public Webserver"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke.py --web --host 0.0.0.0 --root ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
If you choose the run the web interface, point your browser at
|
||||||
|
http://localhost:9090 in order to load the GUI.
|
||||||
|
|
||||||
|
!!! tip
|
||||||
|
|
||||||
|
You can permanently set the location of the runtime directory by setting the environment variable INVOKEAI_ROOT to the path of your choice.
|
||||||
|
|
||||||
|
10. Render away!
|
||||||
|
|
||||||
|
Browse the [features](../features/CLI.md) section to learn about all the things you
|
||||||
|
can do with InvokeAI.
|
||||||
|
|
||||||
|
Note that some GPUs are slow to warm up. In particular, when using an AMD
|
||||||
|
card with the ROCm driver, you may have to wait for over a minute the first
|
||||||
|
time you try to generate an image. Fortunately, after the warm up period
|
||||||
|
rendering will be fast.
|
||||||
|
|
||||||
|
11. Subsequently, to relaunch the script, be sure to run "conda activate
|
||||||
|
invokeai", enter the `InvokeAI` directory, and then launch the invoke
|
||||||
|
script. If you forget to activate the 'invokeai' environment, the script
|
||||||
|
will fail with multiple `ModuleNotFound` errors.
|
||||||
|
|
||||||
|
## Creating an "install" version of InvokeAI
|
||||||
|
|
||||||
|
If you wish you can install InvokeAI and all its dependencies in the
|
||||||
|
runtime directory. This allows you to delete the source code
|
||||||
|
repository and eliminates the need to provide `--root_dir` at startup
|
||||||
|
time. Note that this method only works with the PIP method.
|
||||||
|
|
||||||
|
1. Follow the instructions for the PIP install, but in step #2 put the
|
||||||
|
virtual environment into the runtime directory. For example, assuming the
|
||||||
|
runtime directory lives in `~/Programs/invokeai`, you'd run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -menv ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Now follow steps 3 to 5 in the PIP recipe, ending with the `pip install`
|
||||||
|
step.
|
||||||
|
|
||||||
|
3. Run one additional step while you are in the source code repository
|
||||||
|
directory `pip install .` (note the dot at the end).
|
||||||
|
|
||||||
|
4. That's all! Now, whenever you activate the virtual environment,
|
||||||
|
`invoke.py` will know where to look for the runtime directory without
|
||||||
|
needing a `--root_dir` argument. In addition, you can now move or
|
||||||
|
delete the source code repository entirely.
|
||||||
|
|
||||||
|
(Don't move the runtime directory!)
|
||||||
|
|
||||||
|
## Updating to newer versions of the script
|
||||||
|
|
||||||
|
This distribution is changing rapidly. If you used the `git clone` method
|
||||||
|
(step 5) to download the InvokeAI directory, then to update to the latest and
|
||||||
|
greatest version, launch the Anaconda window, enter `InvokeAI` and type:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git pull
|
||||||
|
conda env update
|
||||||
|
python scripts/configure_invokeai.py --skip-sd-weights #optional
|
||||||
|
```
|
||||||
|
|
||||||
|
This will bring your local copy into sync with the remote one. The last step may
|
||||||
|
be needed to take advantage of new features or released models. The
|
||||||
|
`--skip-sd-weights` flag will prevent the script from prompting you to download
|
||||||
|
the big Stable Diffusion weights files.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
Here are some common issues and their suggested solutions.
|
||||||
|
|
||||||
|
### Conda
|
||||||
|
|
||||||
|
#### Conda fails before completing `conda update`
|
||||||
|
|
||||||
|
The usual source of these errors is a package incompatibility. While we have
|
||||||
|
tried to minimize these, over time packages get updated and sometimes introduce
|
||||||
|
incompatibilities.
|
||||||
|
|
||||||
|
We suggest that you search
|
||||||
|
[Issues](https://github.com/invoke-ai/InvokeAI/issues) or the "bugs-and-support"
|
||||||
|
channel of the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
|
||||||
|
|
||||||
|
You may also try to install the broken packages manually using PIP. To do this,
|
||||||
|
activate the `invokeai` environment, and run `pip install` with the name and
|
||||||
|
version of the package that is causing the incompatibility. For example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install test-tube==0.7.5
|
||||||
|
```
|
||||||
|
|
||||||
|
You can keep doing this until all requirements are satisfied and the `invoke.py`
|
||||||
|
script runs without errors. Please report to
|
||||||
|
[Issues](https://github.com/invoke-ai/InvokeAI/issues) what you were able to do
|
||||||
|
to work around the problem so that others can benefit from your investigation.
|
||||||
|
|
||||||
|
### Create Conda Environment fails on MacOS
|
||||||
|
|
||||||
|
If conda create environment fails with lmdb error, this is most likely caused by Clang.
|
||||||
|
Run brew config to see which Clang is installed on your Mac. If Clang isn't installed, that's causing the error.
|
||||||
|
Start by installing additional XCode command line tools, followed by brew install llvm.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
xcode-select --install
|
||||||
|
brew install llvm
|
||||||
|
```
|
||||||
|
|
||||||
|
If brew config has Clang installed, update to the latest llvm and try creating the environment again.
|
||||||
|
|
||||||
|
#### `configure_invokeai.py` or `invoke.py` crashes at an early stage
|
||||||
|
|
||||||
|
This is usually due to an incomplete or corrupted Conda install. Make sure you
|
||||||
|
have linked to the correct environment file and run `conda update` again.
|
||||||
|
|
||||||
|
If the problem persists, a more extreme measure is to clear Conda's caches and
|
||||||
|
remove the `invokeai` environment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conda deactivate
|
||||||
|
conda env remove -n invokeai
|
||||||
|
conda clean -a
|
||||||
|
conda update
|
||||||
|
```
|
||||||
|
|
||||||
|
This removes all cached library files, including ones that may have been
|
||||||
|
corrupted somehow. (This is not supposed to happen, but does anyway).
|
||||||
|
|
||||||
|
#### `invoke.py` crashes at a later stage
|
||||||
|
|
||||||
|
If the CLI or web site had been working ok, but something unexpected happens
|
||||||
|
later on during the session, you've encountered a code bug that is probably
|
||||||
|
unrelated to an install issue. Please search
|
||||||
|
[Issues](https://github.com/invoke-ai/InvokeAI/issues), file a bug report, or
|
||||||
|
ask for help on [Discord](https://discord.gg/ZmtBAhwWhy)
|
||||||
|
|
||||||
|
#### My renders are running very slowly
|
||||||
|
|
||||||
|
You may have installed the wrong torch (machine learning) package, and the
|
||||||
|
system is running on CPU rather than the GPU. To check, look at the log messages
|
||||||
|
that appear when `invoke.py` is first starting up. One of the earlier lines
|
||||||
|
should say `Using device type cuda`. On AMD systems, it will also say "cuda",
|
||||||
|
and on Macintoshes, it should say "mps". If instead the message says it is
|
||||||
|
running on "cpu", then you may need to install the correct torch library.
|
||||||
|
|
||||||
|
You may be able to fix this by installing a different torch library. Here are
|
||||||
|
the magic incantations for Conda and PIP.
|
||||||
|
|
||||||
|
!!! todo "For CUDA systems"
|
||||||
|
|
||||||
|
- conda
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conda install pytorch torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia
|
||||||
|
```
|
||||||
|
|
||||||
|
- pip
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! todo "For AMD systems"
|
||||||
|
|
||||||
|
- conda
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conda activate invokeai
|
||||||
|
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||||
|
```
|
||||||
|
|
||||||
|
- pip
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||||
|
```
|
||||||
|
|
||||||
|
More information and troubleshooting tips can be found at https://pytorch.org.
|
@ -1,12 +1,12 @@
|
|||||||
---
|
---
|
||||||
title: Docker
|
title: Installing with Docker
|
||||||
---
|
---
|
||||||
|
|
||||||
# :fontawesome-brands-docker: Docker
|
# :fontawesome-brands-docker: Docker
|
||||||
|
|
||||||
!!! warning "For end users"
|
!!! warning "For end users"
|
||||||
|
|
||||||
We highly recommend to Install InvokeAI locally using [these instructions](index.md)"
|
We highly recommend to Install InvokeAI locally using [these instructions](index.md)
|
||||||
|
|
||||||
!!! tip "For developers"
|
!!! tip "For developers"
|
||||||
|
|
||||||
@ -16,6 +16,10 @@ title: Docker
|
|||||||
|
|
||||||
For general use, install locally to leverage your machine's GPU.
|
For general use, install locally to leverage your machine's GPU.
|
||||||
|
|
||||||
|
!!! tip "For running on a cloud instance/service"
|
||||||
|
|
||||||
|
Check out the [Running InvokeAI in the cloud with Docker](#running-invokeai-in-the-cloud-with-docker) section below
|
||||||
|
|
||||||
## Why containers?
|
## Why containers?
|
||||||
|
|
||||||
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
|
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
|
||||||
@ -36,7 +40,7 @@ development purposes it's fine. Once you're done with development tasks on your
|
|||||||
laptop you can build for the target platform and architecture and deploy to
|
laptop you can build for the target platform and architecture and deploy to
|
||||||
another environment with NVIDIA GPUs on-premises or in the cloud.
|
another environment with NVIDIA GPUs on-premises or in the cloud.
|
||||||
|
|
||||||
## Installation on a Linux container
|
## Installation in a Linux container (desktop)
|
||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
|
|
||||||
@ -72,14 +76,20 @@ created in the last step.
|
|||||||
|
|
||||||
Some Suggestions of variables you may want to change besides the Token:
|
Some Suggestions of variables you may want to change besides the Token:
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
|
||||||
| Environment-Variable | Default value | Description |
|
| Environment-Variable | Default value | Description |
|
||||||
| ------------------------- | ----------------------------- | ---------------------------------------------------------------------------- |
|
| -------------------- | ----------------------------- | -------------------------------------------------------------------------------------------- |
|
||||||
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without you can't get the checkpoint |
|
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
|
||||||
| `ARCH` | x86_64 | if you are using a ARM based CPU |
|
| `REPOSITORY_NAME` | The Basename of the Repo folder | This name will used as the container repository/image name |
|
||||||
| `INVOKEAI_TAG` | invokeai-x86_64 | the Container Repository / Tag which will be used |
|
| `VOLUMENAME` | `${REPOSITORY_NAME,,}_data` | Name of the Docker Volume where model files will be stored |
|
||||||
| `INVOKEAI_CONDA_ENV_FILE` | environment-lin-cuda.yml | since environment.yml wouldn't work with aarch |
|
| `ARCH` | arch of the build machine | can be changed if you want to build the image for another arch |
|
||||||
| `INVOKEAI_GIT` | invoke-ai/InvokeAI | the repository to use |
|
| `INVOKEAI_TAG` | latest | the Container Repository / Tag which will be used |
|
||||||
| `INVOKEAI_BRANCH` | main | the branch to checkout |
|
| `PIP_REQUIREMENTS` | `requirements-lin-cuda.txt` | the requirements file to use (from `environments-and-requirements`) |
|
||||||
|
| `CONTAINER_FLAVOR` | cuda | the flavor of the image, which can be changed if you build f.e. with amd requirements file. |
|
||||||
|
| `INVOKE_DOCKERFILE` | `docker-build/Dockerfile` | the Dockerfile which should be built, handy for development |
|
||||||
|
|
||||||
|
</figure>
|
||||||
|
|
||||||
#### Build the Image
|
#### Build the Image
|
||||||
|
|
||||||
@ -106,15 +116,115 @@ When used without arguments, the container will start the webserver and provide
|
|||||||
you the link to open it. But if you want to use some other parameters you can
|
you the link to open it. But if you want to use some other parameters you can
|
||||||
also do so.
|
also do so.
|
||||||
|
|
||||||
!!! example ""
|
!!! example "run script example"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./docker-build/run.sh --from_file tests/validate_pr_prompt.txt
|
./docker-build/run.sh "banana sushi" -Ak_lms -S42 -s10
|
||||||
```
|
```
|
||||||
|
|
||||||
The output folder is located on the volume which is also used to store the model.
|
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
|
||||||
|
|
||||||
Find out more about available CLI-Parameters at [features/CLI.md](../features/CLI.md/#arguments)
|
Find out more about available CLI-Parameters at [features/CLI.md](../../features/CLI/#arguments)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Running the container on your GPU
|
||||||
|
|
||||||
|
If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running the container with an extra
|
||||||
|
environment variable to enable GPU usage and have the process run much faster:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
GPU_FLAGS=all ./docker-build/run.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
This passes the `--gpus all` to docker and uses the GPU.
|
||||||
|
|
||||||
|
If you don't have a GPU (or your host is not yet setup to use it) you will see a message like this:
|
||||||
|
|
||||||
|
`docker: Error response from daemon: could not select device driver "" with capabilities: [[gpu]].`
|
||||||
|
|
||||||
|
You can use the full set of GPU combinations documented here:
|
||||||
|
|
||||||
|
https://docs.docker.com/config/containers/resource_constraints/#gpu
|
||||||
|
|
||||||
|
For example, use `GPU_FLAGS=device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a` to choose a specific device identified by a UUID.
|
||||||
|
|
||||||
|
## Running InvokeAI in the cloud with Docker
|
||||||
|
|
||||||
|
We offer an optimized Ubuntu-based image that has been well-tested in cloud deployments. Note: it also works well locally on Linux x86_64 systems with an Nvidia GPU. It *may* also work on Windows under WSL2 and on Intel Mac (not tested).
|
||||||
|
|
||||||
|
An advantage of this method is that it does not need any local setup or additional dependencies.
|
||||||
|
|
||||||
|
See the `docker-build/Dockerfile.cloud` file to familizarize yourself with the image's content.
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
- a `docker` runtime
|
||||||
|
- `make` (optional but helps for convenience)
|
||||||
|
- Huggingface token to download models, or an existing InvokeAI runtime directory from a previous installation
|
||||||
|
|
||||||
|
Neither local Python nor any dependencies are required. If you don't have `make` (part of `build-essentials` on Ubuntu), or do not wish to install it, the commands from the `docker-build/Makefile` are readily adaptable to be executed directly.
|
||||||
|
|
||||||
|
### Building and running the image locally
|
||||||
|
|
||||||
|
1. Clone this repo and `cd docker-build`
|
||||||
|
1. `make build` - this will build the image. (This does *not* require a GPU-capable system).
|
||||||
|
1. _(skip this step if you already have a complete InvokeAI runtime directory)_
|
||||||
|
- `make configure` (This does *not* require a GPU-capable system)
|
||||||
|
- this will create a local cache of models and configs (a.k.a the _runtime dir_)
|
||||||
|
- enter your Huggingface token when prompted
|
||||||
|
1. `make web`
|
||||||
|
1. Open the `http://localhost:9090` URL in your browser, and enjoy the banana sushi!
|
||||||
|
|
||||||
|
To use InvokeAI on the cli, run `make cli`. To open a Bash shell in the container for arbitraty advanced use, `make shell`.
|
||||||
|
|
||||||
|
#### Building and running without `make`
|
||||||
|
|
||||||
|
(Feel free to adapt paths such as `${HOME}/invokeai` to your liking, and modify the CLI arguments as necessary).
|
||||||
|
|
||||||
|
!!! example "Build the image and configure the runtime directory"
|
||||||
|
```Shell
|
||||||
|
cd docker-build
|
||||||
|
|
||||||
|
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
|
||||||
|
|
||||||
|
docker run --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/configure_invokeai.py"
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! example "Run the web server"
|
||||||
|
```Shell
|
||||||
|
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai -p9090:9090 local/invokeai:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
Access the Web UI at http://localhost:9090
|
||||||
|
|
||||||
|
!!! example "Run the InvokeAI interactive CLI"
|
||||||
|
```
|
||||||
|
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/invoke.py"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Running the image in the cloud
|
||||||
|
|
||||||
|
This image works anywhere you can run a container with a mounted Docker volume. You may either build this image on a cloud instance, or build and push it to your Docker registry. To manually run this on a cloud instance (such as AWS EC2, GCP or Azure VM):
|
||||||
|
|
||||||
|
1. build this image either in the cloud (you'll need to pull the repo), or locally
|
||||||
|
1. `docker tag` it as `your-registry/invokeai` and push to your registry (i.e. Dockerhub)
|
||||||
|
1. `docker pull` it on your cloud instance
|
||||||
|
1. configure the runtime directory as per above example, using `docker run ... configure_invokeai.py` script
|
||||||
|
1. use either one of the `docker run` commands above, substituting the image name for your own image.
|
||||||
|
|
||||||
|
To run this on Runpod, please refer to the following Runpod template: https://www.runpod.io/console/gpu-secure-cloud?template=vm19ukkycf (you need a Runpod subscription). When launching the template, feel free to set the image to pull your own build.
|
||||||
|
|
||||||
|
The template's `README` provides ample detail, but at a high level, the process is as follows:
|
||||||
|
|
||||||
|
1. create a pod using this Docker image
|
||||||
|
1. ensure the pod has an `INVOKEAI_ROOT=<path_to_your_persistent_volume>` environment variable, and that it corresponds to the path to your pod's persistent volume mount
|
||||||
|
1. Run the pod with `sleep infinity` as the Docker command
|
||||||
|
1. Use Runpod basic SSH to connect to the pod, and run `python scripts/configure_invokeai.py` script
|
||||||
|
1. Stop the pod, and change the Docker command to `python scripts/invoke.py --web --host 0.0.0.0`
|
||||||
|
1. Run the pod again, connect to your pod on HTTP port 9090, and enjoy the banana sushi!
|
||||||
|
|
||||||
|
Running on other cloud providers such as Vast.ai will likely work in a similar fashion.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
@ -56,7 +56,7 @@ unofficial Stable Diffusion models and where they can be obtained.
|
|||||||
|
|
||||||
There are three ways to install weights files:
|
There are three ways to install weights files:
|
||||||
|
|
||||||
1. During InvokeAI installation, the `preload_models.py` script can download
|
1. During InvokeAI installation, the `configure_invokeai.py` script can download
|
||||||
them for you.
|
them for you.
|
||||||
|
|
||||||
2. You can use the command-line interface (CLI) to import, configure and modify
|
2. You can use the command-line interface (CLI) to import, configure and modify
|
||||||
@ -65,13 +65,13 @@ There are three ways to install weights files:
|
|||||||
3. You can download the files manually and add the appropriate entries to
|
3. You can download the files manually and add the appropriate entries to
|
||||||
`models.yaml`.
|
`models.yaml`.
|
||||||
|
|
||||||
### Installation via `preload_models.py`
|
### Installation via `configure_invokeai.py`
|
||||||
|
|
||||||
This is the most automatic way. Run `scripts/preload_models.py` from the
|
This is the most automatic way. Run `scripts/configure_invokeai.py` from the
|
||||||
console. It will ask you to select which models to download and lead you through
|
console. It will ask you to select which models to download and lead you through
|
||||||
the steps of setting up a Hugging Face account if you haven't done so already.
|
the steps of setting up a Hugging Face account if you haven't done so already.
|
||||||
|
|
||||||
To start, run `python scripts/preload_models.py` from within the InvokeAI:
|
To start, run `python scripts/configure_invokeai.py` from within the InvokeAI:
|
||||||
directory
|
directory
|
||||||
|
|
||||||
!!! example ""
|
!!! example ""
|
||||||
@ -100,7 +100,7 @@ directory
|
|||||||
The original Stable Diffusion version 1.4 weight file (4.27 GB)
|
The original Stable Diffusion version 1.4 weight file (4.27 GB)
|
||||||
Download? [n] n
|
Download? [n] n
|
||||||
[4] waifu-diffusion-1.3:
|
[4] waifu-diffusion-1.3:
|
||||||
Stable Diffusion 1.4 fine tuned on anime-styled images (4.27)
|
Stable Diffusion 1.4 fine tuned on anime-styled images (4.27 GB)
|
||||||
Download? [n] y
|
Download? [n] y
|
||||||
[5] ft-mse-improved-autoencoder-840000:
|
[5] ft-mse-improved-autoencoder-840000:
|
||||||
StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB) (recommended)
|
StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB) (recommended)
|
||||||
@ -162,6 +162,12 @@ the command-line client's `!import_model` command.
|
|||||||
Type a bit of the path name and hit ++tab++ in order to get a choice of
|
Type a bit of the path name and hit ++tab++ in order to get a choice of
|
||||||
possible completions.
|
possible completions.
|
||||||
|
|
||||||
|
!!! tip "on Windows, you can drag model files onto the command-line"
|
||||||
|
|
||||||
|
Once you have typed in `!import_model `, you can drag the model `.ckpt` file
|
||||||
|
onto the command-line to insert the model path. This way, you don't need to
|
||||||
|
type it or copy/paste.
|
||||||
|
|
||||||
4. Follow the wizard's instructions to complete installation as shown in the
|
4. Follow the wizard's instructions to complete installation as shown in the
|
||||||
example here:
|
example here:
|
||||||
|
|
||||||
@ -238,7 +244,7 @@ arabian-nights-1.0:
|
|||||||
| arabian-nights-1.0 | This is the name of the model that you will refer to from within the CLI and the WebGUI when you need to load and use the model. |
|
| arabian-nights-1.0 | This is the name of the model that you will refer to from within the CLI and the WebGUI when you need to load and use the model. |
|
||||||
| description | Any description that you want to add to the model to remind you what it is. |
|
| description | Any description that you want to add to the model to remind you what it is. |
|
||||||
| weights | Relative path to the .ckpt weights file for this model. |
|
| weights | Relative path to the .ckpt weights file for this model. |
|
||||||
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `preload_models.py` script. |
|
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `configure_invokeai.py` script. |
|
||||||
| vae | If you want to add a VAE file to the model, then enter its path here. |
|
| vae | If you want to add a VAE file to the model, then enter its path here. |
|
||||||
| width, height | This is the width and height of the images used to train the model. Currently they are always 512 and 512. |
|
| width, height | This is the width and height of the images used to train the model. Currently they are always 512 and 512. |
|
||||||
|
|
115
docs/installation/060_INSTALL_PATCHMATCH.md
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
---
|
||||||
|
title: Installing PyPatchMatch
|
||||||
|
---
|
||||||
|
|
||||||
|
# :octicons-paintbrush-16: Installing PyPatchMatch
|
||||||
|
|
||||||
|
pypatchmatch is a Python module for inpainting images. It is not
|
||||||
|
needed to run InvokeAI, but it greatly improves the quality of
|
||||||
|
inpainting and outpainting and is recommended.
|
||||||
|
|
||||||
|
Unfortunately, it is a C++ optimized module and installation
|
||||||
|
can be somewhat challenging. This guide leads you through the steps.
|
||||||
|
|
||||||
|
## Windows
|
||||||
|
|
||||||
|
You're in luck! On Windows platforms PyPatchMatch will install
|
||||||
|
automatically on Windows systems with no extra intervention.
|
||||||
|
|
||||||
|
## Macintosh
|
||||||
|
|
||||||
|
PyPatchMatch is not currently supported, but the team is working on
|
||||||
|
it.
|
||||||
|
|
||||||
|
## Linux
|
||||||
|
|
||||||
|
Prior to installing PyPatchMatch, you need to take the following
|
||||||
|
steps:
|
||||||
|
|
||||||
|
### Debian Based Distros
|
||||||
|
|
||||||
|
|
||||||
|
1. Install the `build-essential` tools:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install build-essential
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Install `opencv`:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo apt install python3-opencv libopencv-dev
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Fix the naming of the `opencv` package configuration file:
|
||||||
|
|
||||||
|
```
|
||||||
|
cd /usr/lib/x86_64-linux-gnu/pkgconfig/
|
||||||
|
ln -sf opencv4.pc opencv.pc
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Activate the environment you use for invokeai, either with
|
||||||
|
`conda` or with a virtual environment.
|
||||||
|
|
||||||
|
5. Do a "develop" install of pypatchmatch:
|
||||||
|
|
||||||
|
```
|
||||||
|
pip install "git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch"
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Confirm that pypatchmatch is installed.
|
||||||
|
At the command-line prompt enter `python`, and
|
||||||
|
then at the `>>>` line type `from patchmatch import patch_match`:
|
||||||
|
It should look like the follwing:
|
||||||
|
|
||||||
|
```
|
||||||
|
Python 3.9.5 (default, Nov 23 2021, 15:27:38)
|
||||||
|
[GCC 9.3.0] on linux
|
||||||
|
Type "help", "copyright", "credits" or "license" for more information.
|
||||||
|
>>> from patchmatch import patch_match
|
||||||
|
Compiling and loading c extensions from "/home/lstein/Projects/InvokeAI/.invokeai-env/src/pypatchmatch/patchmatch".
|
||||||
|
rm -rf build/obj libpatchmatch.so
|
||||||
|
mkdir: created directory 'build/obj'
|
||||||
|
mkdir: created directory 'build/obj/csrc/'
|
||||||
|
[dep] csrc/masked_image.cpp ...
|
||||||
|
[dep] csrc/nnf.cpp ...
|
||||||
|
[dep] csrc/inpaint.cpp ...
|
||||||
|
[dep] csrc/pyinterface.cpp ...
|
||||||
|
[CC] csrc/pyinterface.cpp ...
|
||||||
|
[CC] csrc/inpaint.cpp ...
|
||||||
|
[CC] csrc/nnf.cpp ...
|
||||||
|
[CC] csrc/masked_image.cpp ...
|
||||||
|
[link] libpatchmatch.so ...
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Arch Based Distros
|
||||||
|
|
||||||
|
1. Install the `base-devel` package:
|
||||||
|
```
|
||||||
|
sudo pacman -Syu
|
||||||
|
sudo pacman -S --needed base-devel
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Install `opencv`:
|
||||||
|
```
|
||||||
|
sudo pacman -S opencv
|
||||||
|
```
|
||||||
|
or for CUDA support
|
||||||
|
```
|
||||||
|
sudo pacman -S opencv-cuda
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Fix the naming of the `opencv` package configuration file:
|
||||||
|
```
|
||||||
|
cd /usr/lib/pkgconfig/
|
||||||
|
ln -sf opencv4.pc opencv.pc
|
||||||
|
```
|
||||||
|
|
||||||
|
**Next, Follow Steps 4-6 from the Debian Section above**
|
||||||
|
|
||||||
|
|
||||||
|
If you see no errors, then you're ready to go!
|
||||||
|
|
||||||
|
|
@ -0,0 +1,89 @@
|
|||||||
|
---
|
||||||
|
title: build binary installers
|
||||||
|
---
|
||||||
|
|
||||||
|
# :simple-buildkite: How to build "binary" installers (InvokeAI-mac/windows/linux_on_*.zip)
|
||||||
|
|
||||||
|
## 1. Ensure `installers/requirements.in` is correct
|
||||||
|
|
||||||
|
and up to date on the branch to be installed.
|
||||||
|
|
||||||
|
## <a name="step-2"></a> 2. Run `pip-compile` on each platform.
|
||||||
|
|
||||||
|
On each target platform, in the branch that is to be installed, and
|
||||||
|
inside the InvokeAI git root folder, run the following commands:
|
||||||
|
|
||||||
|
```commandline
|
||||||
|
conda activate invokeai # or however you activate python
|
||||||
|
pip install pip-tools
|
||||||
|
pip-compile --allow-unsafe --generate-hashes --output-file=binary_installer/<reqsfile>.txt binary_installer/requirements.in
|
||||||
|
```
|
||||||
|
where `<reqsfile>.txt` is whichever of
|
||||||
|
```commandline
|
||||||
|
py3.10-darwin-arm64-mps-reqs.txt
|
||||||
|
py3.10-darwin-x86_64-reqs.txt
|
||||||
|
py3.10-linux-x86_64-cuda-reqs.txt
|
||||||
|
py3.10-windows-x86_64-cuda-reqs.txt
|
||||||
|
```
|
||||||
|
matches the current OS and architecture.
|
||||||
|
> There is no way to cross-compile these. They must be done on a system matching the target OS and arch.
|
||||||
|
|
||||||
|
## <a name="step-3"></a> 3. Set github repository and branch
|
||||||
|
|
||||||
|
Once all reqs files have been collected and committed **to the branch
|
||||||
|
to be installed**, edit `binary_installer/install.sh.in` and `binary_installer/install.bat.in` so that `RELEASE_URL`
|
||||||
|
and `RELEASE_SOURCEBALL` point to the github repo and branch that is
|
||||||
|
to be installed.
|
||||||
|
|
||||||
|
For example, to install `main` branch of `InvokeAI`, they should be
|
||||||
|
set as follows:
|
||||||
|
|
||||||
|
`install.sh.in`:
|
||||||
|
```commandline
|
||||||
|
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||||
|
RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||||
|
```
|
||||||
|
|
||||||
|
`install.bat.in`:
|
||||||
|
```commandline
|
||||||
|
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||||
|
set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||||
|
```
|
||||||
|
|
||||||
|
Or, to install `damians-cool-feature` branch of `damian0815`, set them
|
||||||
|
as follows:
|
||||||
|
|
||||||
|
`install.sh.in`:
|
||||||
|
```commandline
|
||||||
|
RELEASE_URL=https://github.com/damian0815/InvokeAI
|
||||||
|
RELEASE_SOURCEBALL=/archive/refs/heads/damians-cool-feature.tar.gz
|
||||||
|
```
|
||||||
|
|
||||||
|
`install.bat.in`:
|
||||||
|
```commandline
|
||||||
|
set RELEASE_URL=https://github.com/damian0815/InvokeAI
|
||||||
|
set RELEASE_SOURCEBALL=/archive/refs/heads/damians-cool-feature.tar.gz
|
||||||
|
```
|
||||||
|
|
||||||
|
The branch and repo specified here **must** contain the correct reqs
|
||||||
|
files. The installer zip files **do not** contain requirements files,
|
||||||
|
they are pulled from the specified branch during the installation
|
||||||
|
process.
|
||||||
|
|
||||||
|
## 4. Create zip files.
|
||||||
|
|
||||||
|
cd into the `installers/` folder and run
|
||||||
|
`./create_installers.sh`. This will create
|
||||||
|
`InvokeAI-mac_on_<branch>.zip`,
|
||||||
|
`InvokeAI-windows_on_<branch>.zip` and
|
||||||
|
`InvokeAI-linux_on_<branch>.zip`. These files can be distributed to end users.
|
||||||
|
|
||||||
|
These zips will continue to function as installers for all future
|
||||||
|
pushes to those branches, as long as necessary changes to
|
||||||
|
`requirements.in` are propagated in a timely manner to the
|
||||||
|
`py3.10-*-reqs.txt` files using pip-compile as outlined in [step
|
||||||
|
2](#step-2).
|
||||||
|
|
||||||
|
To actually install, users should unzip the appropriate zip file into an empty
|
||||||
|
folder and run `install.sh` on macOS/Linux or `install.bat` on
|
||||||
|
Windows.
|
1
docs/installation/INSTALL_AUTOMATED.md
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
010_INSTALL_AUTOMATED.md
|
@ -1,27 +0,0 @@
|
|||||||
---
|
|
||||||
title: Running InvokeAI on Google Colab using a Jupyter Notebook
|
|
||||||
---
|
|
||||||
|
|
||||||
# THIS NEEDS TO BE FLESHED OUT
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
We have a [Jupyter
|
|
||||||
notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable-Diffusion-local-Windows.ipynb)
|
|
||||||
with cell-by-cell installation steps. It will download the code in
|
|
||||||
this repo as one of the steps, so instead of cloning this repo, simply
|
|
||||||
download the notebook from the link above and load it up in VSCode
|
|
||||||
(with the appropriate extensions installed)/Jupyter/JupyterLab and
|
|
||||||
start running the cells one-by-one.
|
|
||||||
|
|
||||||
!!! Note "you will need NVIDIA drivers, Python 3.10, and Git installed beforehand"
|
|
||||||
|
|
||||||
## Walkthrough
|
|
||||||
|
|
||||||
## Updating to newer versions
|
|
||||||
|
|
||||||
### Updating the stable version
|
|
||||||
|
|
||||||
### Updating to the development version
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
@ -8,7 +8,7 @@ title: Manual Installation
|
|||||||
|
|
||||||
!!! warning "This is for advanced Users"
|
!!! warning "This is for advanced Users"
|
||||||
|
|
||||||
who are already expirienced with using conda or pip
|
who are already experienced with using conda or pip
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
@ -155,10 +155,10 @@ command-line completion.
|
|||||||
process for this is described in [here](INSTALLING_MODELS.md).
|
process for this is described in [here](INSTALLING_MODELS.md).
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python scripts/preload_models.py
|
python scripts/configure_invokeai.py
|
||||||
```
|
```
|
||||||
|
|
||||||
The script `preload_models.py` will interactively guide you through the
|
The script `configure_invokeai.py` will interactively guide you through the
|
||||||
process of downloading and installing the weights files needed for InvokeAI.
|
process of downloading and installing the weights files needed for InvokeAI.
|
||||||
Note that the main Stable Diffusion weights file is protected by a license
|
Note that the main Stable Diffusion weights file is protected by a license
|
||||||
agreement that you have to agree to. The script will list the steps you need
|
agreement that you have to agree to. The script will list the steps you need
|
||||||
@ -220,7 +220,7 @@ greatest version, launch the Anaconda window, enter `InvokeAI` and type:
|
|||||||
```bash
|
```bash
|
||||||
git pull
|
git pull
|
||||||
conda env update
|
conda env update
|
||||||
python scripts/preload_models.py --no-interactive #optional
|
python scripts/configure_invokeai.py --no-interactive #optional
|
||||||
```
|
```
|
||||||
|
|
||||||
This will bring your local copy into sync with the remote one. The last step may
|
This will bring your local copy into sync with the remote one. The last step may
|
||||||
@ -359,7 +359,7 @@ brew install llvm
|
|||||||
|
|
||||||
If brew config has Clang installed, update to the latest llvm and try creating the environment again.
|
If brew config has Clang installed, update to the latest llvm and try creating the environment again.
|
||||||
|
|
||||||
#### `preload_models.py` or `invoke.py` crashes at an early stage
|
#### `configure_invokeai.py` or `invoke.py` crashes at an early stage
|
||||||
|
|
||||||
This is usually due to an incomplete or corrupted Conda install. Make sure you
|
This is usually due to an incomplete or corrupted Conda install. Make sure you
|
||||||
have linked to the correct environment file and run `conda update` again.
|
have linked to the correct environment file and run `conda update` again.
|
||||||
|
@ -1,156 +0,0 @@
|
|||||||
---
|
|
||||||
title: Source Installer
|
|
||||||
---
|
|
||||||
|
|
||||||
# The InvokeAI Source Installer
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
The source installer is a shell script that attempts to automate every step
|
|
||||||
needed to install and run InvokeAI on a stock computer running recent versions
|
|
||||||
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
|
||||||
version of InvokeAI with the option to upgrade to experimental versions later.
|
|
||||||
It is not as foolproof as the [InvokeAI installer](INSTALL_INVOKE.md)
|
|
||||||
|
|
||||||
Before you begin, make sure that you meet the
|
|
||||||
[hardware requirements](index.md#Hardware_Requirements) and has the appropriate
|
|
||||||
GPU drivers installed. In particular, if you are a Linux user with an AMD GPU
|
|
||||||
installed, you may need to install the
|
|
||||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
|
||||||
|
|
||||||
Installation requires roughly 18G of free disk space to load the libraries and
|
|
||||||
recommended model weights files.
|
|
||||||
|
|
||||||
## Walk through
|
|
||||||
|
|
||||||
Though there are multiple steps, there really is only one click involved to kick
|
|
||||||
off the process.
|
|
||||||
|
|
||||||
1. The source installer is distributed in ZIP files. Go to the
|
|
||||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
|
||||||
look for a series of files named:
|
|
||||||
|
|
||||||
- invokeAI-src-installer-mac.zip
|
|
||||||
- invokeAI-src-installer-windows.zip
|
|
||||||
- invokeAI-src-installer-linux.zip
|
|
||||||
|
|
||||||
Download the one that is appropriate for your operating system.
|
|
||||||
|
|
||||||
2. Unpack the zip file into a directory that has at least 18G of free space. Do
|
|
||||||
_not_ unpack into a directory that has an earlier version of InvokeAI.
|
|
||||||
|
|
||||||
This will create a new directory named "InvokeAI". This example shows how
|
|
||||||
this would look using the `unzip` command-line tool, but you may use any
|
|
||||||
graphical or command-line Zip extractor:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
C:\Documents\Linco> unzip invokeAI-windows.zip
|
|
||||||
Archive: C: \Linco\Downloads\invokeAI-linux.zip
|
|
||||||
creating: invokeAI\
|
|
||||||
inflating: invokeAI\install.bat
|
|
||||||
inflating: invokeAI\readme.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
3. If you are using a desktop GUI, double-click the installer file. It will be
|
|
||||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|
|
||||||
Macintosh systems.
|
|
||||||
|
|
||||||
4. Alternatively, form the command line, run the shell script or .bat file:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
C:\Documents\Linco> cd invokeAI
|
|
||||||
C:\Documents\Linco\invokeAI> install.bat
|
|
||||||
```
|
|
||||||
|
|
||||||
5. Sit back and let the install script work. It will install various binary
|
|
||||||
requirements including Conda, Git and Python, then download the current
|
|
||||||
InvokeAI code and install it along with its dependencies.
|
|
||||||
|
|
||||||
6. After installation completes, the installer will launch a script called
|
|
||||||
`preload_models.py`, which will guide you through the first-time process of
|
|
||||||
selecting one or more Stable Diffusion model weights files, downloading and
|
|
||||||
configuring them.
|
|
||||||
|
|
||||||
Note that the main Stable Diffusion weights file is protected by a license
|
|
||||||
agreement that you must agree to in order to use. The script will list the
|
|
||||||
steps you need to take to create an account on the official site that hosts
|
|
||||||
the weights files, accept the agreement, and provide an access token that
|
|
||||||
allows InvokeAI to legally download and install the weights files.
|
|
||||||
|
|
||||||
If you have already downloaded the weights file(s) for another Stable
|
|
||||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
|
||||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
|
||||||
process for this is described in [Installing Models](INSTALLING_MODELS.md).
|
|
||||||
|
|
||||||
7. The script will now exit and you'll be ready to generate some images. The
|
|
||||||
invokeAI directory will contain numerous files. Look for a shell script
|
|
||||||
named `invoke.sh` (Linux/Mac) or `invoke.bat` (Windows). Launch the script
|
|
||||||
by double-clicking it or typing its name at the command-line:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
C:\Documents\Linco> cd invokeAI
|
|
||||||
C:\Documents\Linco\invokeAI> invoke.bat
|
|
||||||
```
|
|
||||||
|
|
||||||
The `invoke.bat` (`invoke.sh`) script will give you the choice of starting (1)
|
|
||||||
the command-line interface, or (2) the web GUI. If you start the latter, you can
|
|
||||||
load the user interface by pointing your browser at http://localhost:9090.
|
|
||||||
|
|
||||||
The `invoke` script also offers you a third option labeled "open the developer
|
|
||||||
console". If you choose this option, you will be dropped into a command-line
|
|
||||||
interface in which you can run python commands directly, access developer tools,
|
|
||||||
and launch InvokeAI with customized options. To do the latter, you would launch
|
|
||||||
the script `scripts/invoke.py` as shown in this example:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
python scripts/invoke.py --web --max_load_models=3 \
|
|
||||||
--model=waifu-1.3 --steps=30 --outdir=C:/Documents/AIPhotos
|
|
||||||
```
|
|
||||||
|
|
||||||
These options are described in detail in the
|
|
||||||
[Command-Line Interface](../features/CLI.md) documentation.
|
|
||||||
|
|
||||||
## Updating to newer versions
|
|
||||||
|
|
||||||
This section describes how to update InvokeAI to new versions of the software.
|
|
||||||
|
|
||||||
### Updating the stable version
|
|
||||||
|
|
||||||
This distribution is changing rapidly, and we add new features on a daily basis.
|
|
||||||
To update to the latest released version (recommended), run the `update.sh`
|
|
||||||
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
|
||||||
release and re-run the `preload_models` script to download any updated models
|
|
||||||
files that may be needed. You can also use this to add additional models that
|
|
||||||
you did not select at installation time.
|
|
||||||
|
|
||||||
### Updating to the development version
|
|
||||||
|
|
||||||
There may be times that there is a feature in the `development` branch of
|
|
||||||
InvokeAI that you'd like to take advantage of. Or perhaps there is a branch that
|
|
||||||
corrects an annoying bug. To do this, you will use the developer's console.
|
|
||||||
|
|
||||||
From within the invokeAI directory, run the command `invoke.sh` (Linux/Mac) or
|
|
||||||
`invoke.bat` (Windows) and selection option (3) to open the developers console.
|
|
||||||
Then run the following command to get the `development branch`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git checkout development
|
|
||||||
git pull
|
|
||||||
conda env update
|
|
||||||
```
|
|
||||||
|
|
||||||
You can now close the developer console and run `invoke` as before. If you get
|
|
||||||
complaints about missing models, then you may need to do the additional step of
|
|
||||||
running `preload_models.py`. This happens relatively infrequently. To do this,
|
|
||||||
simply open up the developer's console again and type
|
|
||||||
`python scripts/preload_models.py`.
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
If you run into problems during or after installation, the InvokeAI team is
|
|
||||||
available to help you. Either create an
|
|
||||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
|
||||||
make a request for help on the "bugs-and-support" channel of our
|
|
||||||
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
|
||||||
organization, but typically somebody will be available to help you within 24
|
|
||||||
hours, and often much sooner.
|
|
@ -1,8 +1,8 @@
|
|||||||
---
|
---
|
||||||
title: InvokeAI Installer
|
title: InvokeAI Binary Installer
|
||||||
---
|
---
|
||||||
|
|
||||||
The InvokeAI installer is a shell script that will install InvokeAI onto a stock
|
The InvokeAI binary installer is a shell script that will install InvokeAI onto a stock
|
||||||
computer running recent versions of Linux, MacOSX or Windows. It will leave you
|
computer running recent versions of Linux, MacOSX or Windows. It will leave you
|
||||||
with a version that runs a stable version of InvokeAI. When a new version of
|
with a version that runs a stable version of InvokeAI. When a new version of
|
||||||
InvokeAI is released, you will download and reinstall the new version.
|
InvokeAI is released, you will download and reinstall the new version.
|
||||||
@ -10,7 +10,7 @@ InvokeAI is released, you will download and reinstall the new version.
|
|||||||
If you wish to tinker with unreleased versions of InvokeAI that introduce
|
If you wish to tinker with unreleased versions of InvokeAI that introduce
|
||||||
potentially unstable new features, you should consider using the
|
potentially unstable new features, you should consider using the
|
||||||
[source installer](INSTALL_SOURCE.md) or one of the
|
[source installer](INSTALL_SOURCE.md) or one of the
|
||||||
[manual install](INSTALL_MANUAL.md) methods.
|
[manual install](../020_INSTALL_MANUAL.md) methods.
|
||||||
|
|
||||||
**Important Caveats**
|
**Important Caveats**
|
||||||
- This script does not support AMD GPUs. For Linux AMD support,
|
- This script does not support AMD GPUs. For Linux AMD support,
|
||||||
@ -36,7 +36,7 @@ recommended model weights files.
|
|||||||
|
|
||||||
1. Download the
|
1. Download the
|
||||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest) of
|
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest) of
|
||||||
InvokeAI's installer for your platform
|
InvokeAI's installer for your platform. Look for a file named `InvokeAI-binary-<your platform>.zip`
|
||||||
|
|
||||||
2. Place the downloaded package someplace where you have plenty of HDD space,
|
2. Place the downloaded package someplace where you have plenty of HDD space,
|
||||||
and have full permissions (i.e. `~/` on Lin/Mac; your home folder on Windows)
|
and have full permissions (i.e. `~/` on Lin/Mac; your home folder on Windows)
|
@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
title: Running InvokeAI on Google Colab using a Jupyter Notebook
|
||||||
|
---
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
We have a [Jupyter
|
||||||
|
notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
|
||||||
|
with cell-by-cell installation steps. It will download the code in
|
||||||
|
this repo as one of the steps, so instead of cloning this repo, simply
|
||||||
|
download the notebook from the link above and load it up in VSCode
|
||||||
|
(with the appropriate extensions installed)/Jupyter/JupyterLab and
|
||||||
|
start running the cells one-by-one.
|
||||||
|
|
||||||
|
!!! Note "you will need NVIDIA drivers, Python 3.10, and Git installed beforehand"
|
||||||
|
|
||||||
|
## Running Online On Google Colabotary
|
||||||
|
[](https://colab.research.google.com/github/invoke-ai/InvokeAI/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
|
||||||
|
|
||||||
|
## Running Locally (Cloning)
|
||||||
|
|
||||||
|
1. Install the Jupyter Notebook python library (one-time):
|
||||||
|
pip install jupyter
|
||||||
|
|
||||||
|
2. Clone the InvokeAI repository:
|
||||||
|
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||||
|
cd invoke-ai
|
||||||
|
3. Create a virtual environment using conda:
|
||||||
|
conda create -n invoke jupyter
|
||||||
|
4. Activate the environment and start the Jupyter notebook:
|
||||||
|
conda activate invoke
|
||||||
|
jupyter notebook
|
@ -69,7 +69,7 @@ title: Manual Installation, Linux
|
|||||||
machine-learning models:
|
machine-learning models:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
(invokeai) ~/InvokeAI$ python3 scripts/preload_models.py
|
(invokeai) ~/InvokeAI$ python3 scripts/configure_invokeai.py
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
@ -79,7 +79,7 @@ title: Manual Installation, Linux
|
|||||||
and obtaining an access token for downloading. It will then download and
|
and obtaining an access token for downloading. It will then download and
|
||||||
install the weights files for you.
|
install the weights files for you.
|
||||||
|
|
||||||
Please look [here](INSTALLING_MODELS.md) for a manual process for doing
|
Please look [here](../INSTALL_MANUAL.md) for a manual process for doing
|
||||||
the same thing.
|
the same thing.
|
||||||
|
|
||||||
7. Start generating images!
|
7. Start generating images!
|
||||||
@ -112,7 +112,7 @@ title: Manual Installation, Linux
|
|||||||
To use an alternative model you may invoke the `!switch` command in
|
To use an alternative model you may invoke the `!switch` command in
|
||||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||||
either the CLI or the Web UI. See [Command Line
|
either the CLI or the Web UI. See [Command Line
|
||||||
Client](../features/CLI.md#model-selection-and-importation). The
|
Client](../../features/CLI.md#model-selection-and-importation). The
|
||||||
model names are defined in `configs/models.yaml`.
|
model names are defined in `configs/models.yaml`.
|
||||||
|
|
||||||
8. Subsequently, to relaunch the script, be sure to run "conda activate
|
8. Subsequently, to relaunch the script, be sure to run "conda activate
|
@ -111,7 +111,7 @@ will do our best to help.
|
|||||||
|
|
||||||
!!! todo "Download the model weight files"
|
!!! todo "Download the model weight files"
|
||||||
|
|
||||||
The `preload_models.py` script downloads and installs the model weight
|
The `configure_invokeai.py` script downloads and installs the model weight
|
||||||
files for you. It will lead you through the process of getting a Hugging Face
|
files for you. It will lead you through the process of getting a Hugging Face
|
||||||
account, accepting the Stable Diffusion model weight license agreement, and
|
account, accepting the Stable Diffusion model weight license agreement, and
|
||||||
creating a download token:
|
creating a download token:
|
||||||
@ -119,7 +119,7 @@ will do our best to help.
|
|||||||
```bash
|
```bash
|
||||||
# This will take some time, depending on the speed of your internet connection
|
# This will take some time, depending on the speed of your internet connection
|
||||||
# and will consume about 10GB of space
|
# and will consume about 10GB of space
|
||||||
python scripts/preload_models.py
|
python scripts/configure_invokeai.py
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! todo "Run InvokeAI!"
|
!!! todo "Run InvokeAI!"
|
||||||
@ -150,7 +150,7 @@ will do our best to help.
|
|||||||
To use an alternative model you may invoke the `!switch` command in
|
To use an alternative model you may invoke the `!switch` command in
|
||||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||||
either the CLI or the Web UI. See [Command Line
|
either the CLI or the Web UI. See [Command Line
|
||||||
Client](../features/CLI.md#model-selection-and-importation). The
|
Client](../../features/CLI.md#model-selection-and-importation). The
|
||||||
model names are defined in `configs/models.yaml`.
|
model names are defined in `configs/models.yaml`.
|
||||||
|
|
||||||
---
|
---
|
||||||
@ -220,8 +220,8 @@ There are several causes of these errors:
|
|||||||
with "(invokeai)" then you activated it. If it begins with "(base)" or
|
with "(invokeai)" then you activated it. If it begins with "(base)" or
|
||||||
something else you haven't.
|
something else you haven't.
|
||||||
|
|
||||||
2. You might've run `./scripts/preload_models.py` or `./scripts/invoke.py`
|
2. You might've run `./scripts/configure_invokeai.py` or `./scripts/invoke.py`
|
||||||
instead of `python ./scripts/preload_models.py` or
|
instead of `python ./scripts/configure_invokeai.py` or
|
||||||
`python ./scripts/invoke.py`. The cause of this error is long so it's below.
|
`python ./scripts/invoke.py`. The cause of this error is long so it's below.
|
||||||
|
|
||||||
<!-- I could not find out where the error is, otherwise would have marked it as a footnote -->
|
<!-- I could not find out where the error is, otherwise would have marked it as a footnote -->
|
||||||
@ -359,7 +359,7 @@ python ./scripts/txt2img.py \
|
|||||||
### OSError: Can't load tokenizer for 'openai/clip-vit-large-patch14'
|
### OSError: Can't load tokenizer for 'openai/clip-vit-large-patch14'
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python scripts/preload_models.py
|
python scripts/configure_invokeai.py
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
225
docs/installation/deprecated_documentation/INSTALL_SOURCE.md
Normal file
@ -0,0 +1,225 @@
|
|||||||
|
---
|
||||||
|
title: Source Installer
|
||||||
|
---
|
||||||
|
|
||||||
|
# The InvokeAI Source Installer
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
The source installer is a shell script that attempts to automate every step
|
||||||
|
needed to install and run InvokeAI on a stock computer running recent versions
|
||||||
|
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
||||||
|
version of InvokeAI with the option to upgrade to experimental versions later.
|
||||||
|
|
||||||
|
Before you begin, make sure that you meet the
|
||||||
|
[hardware requirements](../../index.md#hardware-requirements) and has the appropriate
|
||||||
|
GPU drivers installed. In particular, if you are a Linux user with an AMD GPU
|
||||||
|
installed, you may need to install the
|
||||||
|
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||||
|
|
||||||
|
Installation requires roughly 18G of free disk space to load the libraries and
|
||||||
|
recommended model weights files.
|
||||||
|
|
||||||
|
## Walk through
|
||||||
|
|
||||||
|
Though there are multiple steps, there really is only one click involved to kick
|
||||||
|
off the process.
|
||||||
|
|
||||||
|
1. The source installer is distributed in ZIP files. Go to the
|
||||||
|
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||||
|
look for a series of files named:
|
||||||
|
|
||||||
|
- [invokeAI-src-installer-2.2.3-mac.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/invokeAI-src-installer-2.2.3-mac.zip)
|
||||||
|
- [invokeAI-src-installer-2.2.3-windows.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/invokeAI-src-installer-2.2.3-windows.zip)
|
||||||
|
- [invokeAI-src-installer-2.2.3-linux.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/invokeAI-src-installer-2.2.3-linux.zip)
|
||||||
|
|
||||||
|
Download the one that is appropriate for your operating system.
|
||||||
|
|
||||||
|
2. Unpack the zip file into a directory that has at least 18G of free space. Do
|
||||||
|
_not_ unpack into a directory that has an earlier version of InvokeAI.
|
||||||
|
|
||||||
|
This will create a new directory named "InvokeAI". This example shows how
|
||||||
|
this would look using the `unzip` command-line tool, but you may use any
|
||||||
|
graphical or command-line Zip extractor:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
C:\Documents\Linco> unzip invokeAI-windows.zip
|
||||||
|
Archive: C: \Linco\Downloads\invokeAI-linux.zip
|
||||||
|
creating: invokeAI\
|
||||||
|
inflating: invokeAI\install.bat
|
||||||
|
inflating: invokeAI\readme.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
3. If you are a macOS user, you may need to install the Xcode command line tools.
|
||||||
|
These are a set of tools that are needed to run certain applications in a Terminal,
|
||||||
|
including InvokeAI. This package is provided directly by Apple.
|
||||||
|
|
||||||
|
To install, open a terminal window and run `xcode-select --install`. You will get
|
||||||
|
a macOS system popup guiding you through the install. If you already have them
|
||||||
|
installed, you will instead see some output in the Terminal advising you that the
|
||||||
|
tools are already installed.
|
||||||
|
|
||||||
|
More information can be found here:
|
||||||
|
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
|
||||||
|
|
||||||
|
4. If you are using a desktop GUI, double-click the installer file. It will be
|
||||||
|
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||||
|
Macintosh systems.
|
||||||
|
|
||||||
|
5. Alternatively, from the command line, run the shell script or .bat file:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
C:\Documents\Linco> cd invokeAI
|
||||||
|
C:\Documents\Linco\invokeAI> install.bat
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Sit back and let the install script work. It will install various binary
|
||||||
|
requirements including Conda, Git and Python, then download the current
|
||||||
|
InvokeAI code and install it along with its dependencies.
|
||||||
|
|
||||||
|
Be aware that some of the library download and install steps take a long time.
|
||||||
|
In particular, the `pytorch` package is quite large and often appears to get
|
||||||
|
"stuck" at 99.9%. Similarly, the `pip installing requirements` step may
|
||||||
|
appear to hang. Have patience and the installation step will eventually
|
||||||
|
resume. However, there are occasions when the library install does
|
||||||
|
legitimately get stuck. If you have been waiting for more than ten minutes
|
||||||
|
and nothing is happening, you can interrupt the script with ^C. You may restart
|
||||||
|
it and it will pick up where it left off.
|
||||||
|
|
||||||
|
7. After installation completes, the installer will launch a script called
|
||||||
|
`configure_invokeai.py`, which will guide you through the first-time process of
|
||||||
|
selecting one or more Stable Diffusion model weights files, downloading and
|
||||||
|
configuring them.
|
||||||
|
|
||||||
|
Note that the main Stable Diffusion weights file is protected by a license
|
||||||
|
agreement that you must agree to in order to use. The script will list the
|
||||||
|
steps you need to take to create an account on the official site that hosts
|
||||||
|
the weights files, accept the agreement, and provide an access token that
|
||||||
|
allows InvokeAI to legally download and install the weights files.
|
||||||
|
|
||||||
|
If you have already downloaded the weights file(s) for another Stable
|
||||||
|
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||||
|
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||||
|
process for this is described in [Installing Models](../050_INSTALLING_MODELS.md).
|
||||||
|
|
||||||
|
8. The script will now exit and you'll be ready to generate some images. The
|
||||||
|
invokeAI directory will contain numerous files. Look for a shell script
|
||||||
|
named `invoke.sh` (Linux/Mac) or `invoke.bat` (Windows). Launch the script
|
||||||
|
by double-clicking it or typing its name at the command-line:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
C:\Documents\Linco> cd invokeAI
|
||||||
|
C:\Documents\Linco\invokeAI> invoke.bat
|
||||||
|
```
|
||||||
|
|
||||||
|
The `invoke.bat` (`invoke.sh`) script will give you the choice of starting (1)
|
||||||
|
the command-line interface, or (2) the web GUI. If you start the latter, you can
|
||||||
|
load the user interface by pointing your browser at http://localhost:9090.
|
||||||
|
|
||||||
|
The `invoke` script also offers you a third option labeled "open the developer
|
||||||
|
console". If you choose this option, you will be dropped into a command-line
|
||||||
|
interface in which you can run python commands directly, access developer tools,
|
||||||
|
and launch InvokeAI with customized options. To do the latter, you would launch
|
||||||
|
the script `scripts/invoke.py` as shown in this example:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
python scripts/invoke.py --web --max_load_models=3 \
|
||||||
|
--model=waifu-1.3 --steps=30 --outdir=C:/Documents/AIPhotos
|
||||||
|
```
|
||||||
|
|
||||||
|
These options are described in detail in the
|
||||||
|
[Command-Line Interface](../../features/CLI.md) documentation.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
_Package dependency conflicts_ If you have previously installed
|
||||||
|
InvokeAI or another Stable Diffusion package, the installer may
|
||||||
|
occasionally pick up outdated libraries and either the installer or
|
||||||
|
`invoke` will fail with complaints out library conflicts. There are
|
||||||
|
two steps you can take to clear this problem. Both of these are done
|
||||||
|
from within the "developer's console", which you can get to by
|
||||||
|
launching `invoke.sh` (or `invoke.bat`) and selecting launch option
|
||||||
|
#3:
|
||||||
|
|
||||||
|
1. Remove the previous `invokeai` environment completely. From within
|
||||||
|
the developer's console, give the command `conda env remove -n
|
||||||
|
invokeai`. This will delete previous files installed by `invoke`.
|
||||||
|
|
||||||
|
Then exit from the developer's console and launch the script
|
||||||
|
`update.sh` (or `update.bat`). This will download the most recent
|
||||||
|
InvokeAI (including bug fixes) and reinstall the environment.
|
||||||
|
You should then be able to run `invoke.sh`/`invoke.bat`.
|
||||||
|
|
||||||
|
2. If this doesn't work, you can try cleaning your system's conda
|
||||||
|
cache. This is slightly more extreme, but won't interfere with
|
||||||
|
any other python-based programs installed on your computer.
|
||||||
|
From the developer's console, run the command `conda clean -a`
|
||||||
|
and answer "yes" to all prompts.
|
||||||
|
|
||||||
|
After this is done, run `update.sh` and try again as before.
|
||||||
|
|
||||||
|
_"Corrupted configuration file."__ Everything seems to install ok, but
|
||||||
|
`invoke` complains of a corrupted configuration file and goes calls
|
||||||
|
`configure_invokeai.py` to fix, but this doesn't fix the problem.
|
||||||
|
|
||||||
|
This issue is often caused by a misconfigured configuration directive
|
||||||
|
in the `.invokeai` initialization file that contains startup settings.
|
||||||
|
This can be corrected by fixing the offending line.
|
||||||
|
|
||||||
|
First find `.invokeai`. It is a small text file located in your home
|
||||||
|
directory, `~/.invokeai` on Mac and Linux systems, and `C:\Users\*your
|
||||||
|
name*\.invokeai` on Windows systems. Open it with a text editor
|
||||||
|
(e.g. Notepad on Windows, TextEdit on Macs, or `nano` on Linux)
|
||||||
|
and look for the lines starting with `--root` and `--outdir`.
|
||||||
|
|
||||||
|
An example is here:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
--root="/home/lstein/invokeai"
|
||||||
|
--outdir="/home/lstein/invokeai/outputs"
|
||||||
|
```
|
||||||
|
|
||||||
|
There should not be whitespace before or after the directory paths,
|
||||||
|
and the paths should not end with slashes:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
--root="/home/lstein/invokeai " # wrong! no whitespace here
|
||||||
|
--root="/home\lstein\invokeai\" # wrong! shouldn't end in a slash
|
||||||
|
```
|
||||||
|
|
||||||
|
Fix the problem with your text editor and save as a **plain text**
|
||||||
|
file. This should clear the issue.
|
||||||
|
|
||||||
|
_If none of these maneuvers fixes the problem_ then please report the
|
||||||
|
problem to the [InvokeAI
|
||||||
|
Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
||||||
|
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive assistance.
|
||||||
|
|
||||||
|
## Updating to newer versions
|
||||||
|
|
||||||
|
This section describes how to update InvokeAI to new versions of the software.
|
||||||
|
|
||||||
|
### Updating the stable version
|
||||||
|
|
||||||
|
This distribution is changing rapidly, and we add new features on a daily basis.
|
||||||
|
To update to the latest released version (recommended), run the `update.sh`
|
||||||
|
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
||||||
|
release and re-run the `configure_invokeai` script to download any updated models
|
||||||
|
files that may be needed. You can also use this to add additional models that
|
||||||
|
you did not select at installation time.
|
||||||
|
|
||||||
|
You can now close the developer console and run `invoke` as before. If you get
|
||||||
|
complaints about missing models, then you may need to do the additional step of
|
||||||
|
running `configure_invokeai.py`. This happens relatively infrequently. To do this,
|
||||||
|
simply open up the developer's console again and type
|
||||||
|
`python scripts/configure_invokeai.py`.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
If you run into problems during or after installation, the InvokeAI team is
|
||||||
|
available to help you. Either create an
|
||||||
|
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
||||||
|
make a request for help on the "bugs-and-support" channel of our
|
||||||
|
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
||||||
|
organization, but typically somebody will be available to help you within 24
|
||||||
|
hours, and often much sooner.
|
@ -7,7 +7,7 @@ title: Manual Installation, Windows
|
|||||||
## **Notebook install (semi-automated)**
|
## **Notebook install (semi-automated)**
|
||||||
|
|
||||||
We have a
|
We have a
|
||||||
[Jupyter notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable-Diffusion-local-Windows.ipynb)
|
[Jupyter notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
|
||||||
with cell-by-cell installation steps. It will download the code in this repo as
|
with cell-by-cell installation steps. It will download the code in this repo as
|
||||||
one of the steps, so instead of cloning this repo, simply download the notebook
|
one of the steps, so instead of cloning this repo, simply download the notebook
|
||||||
from the link above and load it up in VSCode (with the appropriate extensions
|
from the link above and load it up in VSCode (with the appropriate extensions
|
||||||
@ -65,7 +65,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
|||||||
7. Load the big stable diffusion weights files and a couple of smaller machine-learning models:
|
7. Load the big stable diffusion weights files and a couple of smaller machine-learning models:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python scripts/preload_models.py
|
python scripts/configure_invokeai.py
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
@ -75,7 +75,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
|||||||
obtaining an access token for downloading. It will then download and install the
|
obtaining an access token for downloading. It will then download and install the
|
||||||
weights files for you.
|
weights files for you.
|
||||||
|
|
||||||
Please look [here](INSTALLING_MODELS.md) for a manual process for doing the
|
Please look [here](../INSTALL_MANUAL.md) for a manual process for doing the
|
||||||
same thing.
|
same thing.
|
||||||
|
|
||||||
8. Start generating images!
|
8. Start generating images!
|
||||||
@ -108,7 +108,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
|||||||
To use an alternative model you may invoke the `!switch` command in
|
To use an alternative model you may invoke the `!switch` command in
|
||||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||||
either the CLI or the Web UI. See [Command Line
|
either the CLI or the Web UI. See [Command Line
|
||||||
Client](../features/CLI.md#model-selection-and-importation). The
|
Client](../../features/CLI.md#model-selection-and-importation). The
|
||||||
model names are defined in `configs/models.yaml`.
|
model names are defined in `configs/models.yaml`.
|
||||||
|
|
||||||
9. Subsequently, to relaunch the script, first activate the Anaconda
|
9. Subsequently, to relaunch the script, first activate the Anaconda
|
@ -5,58 +5,29 @@ title: Overview
|
|||||||
We offer several ways to install InvokeAI, each one suited to your
|
We offer several ways to install InvokeAI, each one suited to your
|
||||||
experience and preferences.
|
experience and preferences.
|
||||||
|
|
||||||
1. [InvokeAI installer](INSTALL_INVOKE.md)
|
1. [Automated Installer](010_INSTALL_AUTOMATED.md)
|
||||||
|
|
||||||
This is a installer script that installs InvokeAI and all the
|
This is a script that will install all of InvokeAI's essential
|
||||||
third party libraries it depends on. When a new version of
|
third party libraries and InvokeAI itself. It includes access to a
|
||||||
InvokeAI is released, you will download and reinstall the new
|
"developer console" which will help us debug problems with you and
|
||||||
version.
|
give you to access experimental features.
|
||||||
|
|
||||||
This installer is designed for people who want the system to "just
|
2. [Manual Installation](020_INSTALL_MANUAL.md)
|
||||||
work", don't have an interest in tinkering with it, and do not
|
|
||||||
care about upgrading to unreleased experimental features.
|
|
||||||
|
|
||||||
**Important Caveats**
|
|
||||||
- This script does not support AMD GPUs. For Linux AMD support,
|
|
||||||
please use the manual or source code installer methods.
|
|
||||||
- This script has difficulty on some Macintosh machines
|
|
||||||
that have previously been used for Python development due to
|
|
||||||
conflicting development tools versions. Mac developers may wish
|
|
||||||
to try the source code installer or one of the manual methods instead.
|
|
||||||
|
|
||||||
2. [Source code installer](INSTALL_SOURCE.md)
|
|
||||||
|
|
||||||
This is a script that will install InvokeAI and all its essential
|
|
||||||
third party libraries. In contrast to the previous installer, it
|
|
||||||
includes access to a "developer console" which will allow you to
|
|
||||||
access experimental features on the development branch.
|
|
||||||
|
|
||||||
This method is recommended for individuals who are wish to stay
|
|
||||||
on the cutting edge of InvokeAI development and are not afraid
|
|
||||||
of occasional breakage.
|
|
||||||
|
|
||||||
3. [Manual Installation](INSTALL_MANUAL.md)
|
|
||||||
|
|
||||||
In this method you will manually run the commands needed to install
|
In this method you will manually run the commands needed to install
|
||||||
InvokeAI and its dependencies. We offer two recipes: one suited to
|
InvokeAI and its dependencies. We offer two recipes: one suited to
|
||||||
those who prefer the `conda` tool, and one suited to those who prefer
|
those who prefer the `conda` tool, and one suited to those who prefer
|
||||||
`pip` and Python virtual environments.
|
`pip` and Python virtual environments. In our hands the pip install
|
||||||
|
is faster and more reliable, but your mileage may vary.
|
||||||
|
|
||||||
This method is recommended for users who have previously used `conda`
|
This method is recommended for users who have previously used `conda`
|
||||||
or `pip` in the past, developers, and anyone who wishes to remain on
|
or `pip` in the past, developers, and anyone who wishes to remain on
|
||||||
the cutting edge of future InvokeAI development and is willing to put
|
the cutting edge of future InvokeAI development and is willing to put
|
||||||
up with occasional glitches and breakage.
|
up with occasional glitches and breakage.
|
||||||
|
|
||||||
4. [Docker Installation](INSTALL_DOCKER.md)
|
3. [Docker Installation](040_INSTALL_DOCKER.md)
|
||||||
|
|
||||||
We also offer a method for creating Docker containers containing
|
We also offer a method for creating Docker containers containing
|
||||||
InvokeAI and its dependencies. This method is recommended for
|
InvokeAI and its dependencies. This method is recommended for
|
||||||
individuals with experience with Docker containers and understand
|
individuals with experience with Docker containers and understand
|
||||||
the pluses and minuses of a container-based install.
|
the pluses and minuses of a container-based install.
|
||||||
|
|
||||||
5. [Jupyter Notebooks Installation](INSTALL_JUPYTER.md)
|
|
||||||
|
|
||||||
This method is suitable for running InvokeAI on a Google Colab
|
|
||||||
account. It is recommended for individuals who have previously
|
|
||||||
worked on the Colab and are comfortable with the Jupyter notebook
|
|
||||||
environment.
|
|
||||||
|
@ -13,6 +13,20 @@ We thank them for all of their time and hard work.
|
|||||||
|
|
||||||
- [Lincoln D. Stein](mailto:lincoln.stein@gmail.com)
|
- [Lincoln D. Stein](mailto:lincoln.stein@gmail.com)
|
||||||
|
|
||||||
|
## **Current core team**
|
||||||
|
|
||||||
|
* @lstein (Lincoln Stein) - Co-maintainer
|
||||||
|
* @blessedcoolant - Co-maintainer
|
||||||
|
* @hipsterusername (Kent Keirsey) - Product Manager
|
||||||
|
* @psychedelicious - Web Team Leader
|
||||||
|
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
||||||
|
* @damian0815 - Attention Systems and Gameplay Engineer
|
||||||
|
* @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
|
||||||
|
* @Netsvetaev (Artur Netsvetaev) - UI/UX Developer
|
||||||
|
* @tildebyte - general gadfly and resident (self-appointed) know-it-all
|
||||||
|
* @keturn - Lead for Diffusers port
|
||||||
|
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||||
|
|
||||||
## **Contributions by**
|
## **Contributions by**
|
||||||
|
|
||||||
- [Sean McLellan](https://github.com/Oceanswave)
|
- [Sean McLellan](https://github.com/Oceanswave)
|
||||||
@ -61,6 +75,7 @@ We thank them for all of their time and hard work.
|
|||||||
- [Kent Keirsey](https://github.com/hipsterusername)
|
- [Kent Keirsey](https://github.com/hipsterusername)
|
||||||
- [psychedelicious](https://github.com/psychedelicious)
|
- [psychedelicious](https://github.com/psychedelicious)
|
||||||
- [damian0815](https://github.com/damian0815)
|
- [damian0815](https://github.com/damian0815)
|
||||||
|
- [Eugene Brodsky](https://github.com/ebr)
|
||||||
|
|
||||||
## **Original CompVis Authors**
|
## **Original CompVis Authors**
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
mkdocs
|
mkdocs
|
||||||
mkdocs-material>=8, <9
|
mkdocs-material>=8, <9
|
||||||
mkdocs-git-revision-date-localized-plugin
|
mkdocs-git-revision-date-localized-plugin
|
||||||
|
mkdocs-redirects==1.2.0
|
||||||
|
|
@ -2,6 +2,7 @@ name: invokeai
|
|||||||
channels:
|
channels:
|
||||||
- pytorch
|
- pytorch
|
||||||
- conda-forge
|
- conda-forge
|
||||||
|
- defaults
|
||||||
dependencies:
|
dependencies:
|
||||||
- albumentations=0.4.3
|
- albumentations=0.4.3
|
||||||
- cudatoolkit
|
- cudatoolkit
|
||||||
@ -29,9 +30,9 @@ dependencies:
|
|||||||
- torchvision
|
- torchvision
|
||||||
- transformers=4.21.3
|
- transformers=4.21.3
|
||||||
- pip:
|
- pip:
|
||||||
- dependency_injector==4.40.0
|
|
||||||
- getpass_asterisk
|
- getpass_asterisk
|
||||||
- omegaconf==2.1.1
|
- omegaconf==2.1.1
|
||||||
|
- picklescan
|
||||||
- pyreadline3
|
- pyreadline3
|
||||||
- realesrgan
|
- realesrgan
|
||||||
- taming-transformers-rom1504
|
- taming-transformers-rom1504
|
||||||
@ -39,7 +40,6 @@ dependencies:
|
|||||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
- git+https://github.com/invoke-ai/GFPGAN#egg=gfpgan
|
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||||
|
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
||||||
- -e .
|
- -e .
|
||||||
variables:
|
|
||||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
|
||||||
|
@ -4,13 +4,12 @@ channels:
|
|||||||
- conda-forge
|
- conda-forge
|
||||||
- defaults
|
- defaults
|
||||||
dependencies:
|
dependencies:
|
||||||
- python>=3.9
|
- python=3.9.*
|
||||||
- pip=22.2.2
|
- pip=22.2.2
|
||||||
- numpy=1.23.3
|
- numpy=1.23.3
|
||||||
- pip:
|
- pip:
|
||||||
- --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
- --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||||
- albumentations==0.4.3
|
- albumentations==0.4.3
|
||||||
- dependency_injector==4.40.0
|
|
||||||
- diffusers==0.6.0
|
- diffusers==0.6.0
|
||||||
- einops==0.3.0
|
- einops==0.3.0
|
||||||
- eventlet
|
- eventlet
|
||||||
@ -23,6 +22,7 @@ dependencies:
|
|||||||
- kornia==0.6.0
|
- kornia==0.6.0
|
||||||
- omegaconf==2.2.3
|
- omegaconf==2.2.3
|
||||||
- opencv-python==4.5.5.64
|
- opencv-python==4.5.5.64
|
||||||
|
- picklescan
|
||||||
- pillow==9.2.0
|
- pillow==9.2.0
|
||||||
- pudb==2019.2
|
- pudb==2019.2
|
||||||
- pyreadline3
|
- pyreadline3
|
||||||
@ -32,6 +32,7 @@ dependencies:
|
|||||||
- streamlit==1.12.0
|
- streamlit==1.12.0
|
||||||
- taming-transformers-rom1504
|
- taming-transformers-rom1504
|
||||||
- test-tube>=0.7.5
|
- test-tube>=0.7.5
|
||||||
|
- tqdm
|
||||||
- torch
|
- torch
|
||||||
- torch-fidelity==0.3.0
|
- torch-fidelity==0.3.0
|
||||||
- torchaudio
|
- torchaudio
|
||||||
@ -39,7 +40,8 @@ dependencies:
|
|||||||
- torchvision
|
- torchvision
|
||||||
- transformers==4.21.3
|
- transformers==4.21.3
|
||||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
- git+https://github.com/invoke-ai/GFPGAN#egg=gfpgan
|
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||||
|
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
||||||
- -e .
|
- -e .
|
||||||
|
@ -4,7 +4,7 @@ channels:
|
|||||||
- conda-forge
|
- conda-forge
|
||||||
- defaults
|
- defaults
|
||||||
dependencies:
|
dependencies:
|
||||||
- python>=3.9
|
- python=3.9.*
|
||||||
- pip=22.2.2
|
- pip=22.2.2
|
||||||
- numpy=1.23.3
|
- numpy=1.23.3
|
||||||
- torchvision=0.13.1
|
- torchvision=0.13.1
|
||||||
@ -13,7 +13,6 @@ dependencies:
|
|||||||
- cudatoolkit=11.6
|
- cudatoolkit=11.6
|
||||||
- pip:
|
- pip:
|
||||||
- albumentations==0.4.3
|
- albumentations==0.4.3
|
||||||
- dependency_injector==4.40.0
|
|
||||||
- diffusers==0.6.0
|
- diffusers==0.6.0
|
||||||
- einops==0.3.0
|
- einops==0.3.0
|
||||||
- eventlet
|
- eventlet
|
||||||
@ -26,6 +25,7 @@ dependencies:
|
|||||||
- kornia==0.6.0
|
- kornia==0.6.0
|
||||||
- omegaconf==2.2.3
|
- omegaconf==2.2.3
|
||||||
- opencv-python==4.5.5.64
|
- opencv-python==4.5.5.64
|
||||||
|
- picklescan
|
||||||
- pillow==9.2.0
|
- pillow==9.2.0
|
||||||
- pudb==2019.2
|
- pudb==2019.2
|
||||||
- pyreadline3
|
- pyreadline3
|
||||||
@ -39,7 +39,8 @@ dependencies:
|
|||||||
- torchmetrics==0.7.0
|
- torchmetrics==0.7.0
|
||||||
- transformers==4.21.3
|
- transformers==4.21.3
|
||||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
- git+https://github.com/invoke-ai/GFPGAN#egg=gfpgan
|
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||||
|
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
||||||
- -e .
|
- -e .
|
||||||
|
@ -52,13 +52,14 @@ dependencies:
|
|||||||
- transformers=4.23
|
- transformers=4.23
|
||||||
- pip:
|
- pip:
|
||||||
- getpass_asterisk
|
- getpass_asterisk
|
||||||
|
- picklescan
|
||||||
- taming-transformers-rom1504
|
- taming-transformers-rom1504
|
||||||
- test-tube==0.7.5
|
- test-tube==0.7.5
|
||||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
- git+https://github.com/invoke-ai/k-diffusion.git@mps#egg=k_diffusion
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||||
- git+https://github.com/invoke-ai/Real-ESRGAN.git#egg=realesrgan
|
|
||||||
- git+https://github.com/invoke-ai/GFPGAN.git#egg=gfpgan
|
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
|
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||||
|
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
||||||
- -e .
|
- -e .
|
||||||
variables:
|
variables:
|
||||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
||||||
|
@ -4,7 +4,7 @@ channels:
|
|||||||
- conda-forge
|
- conda-forge
|
||||||
- defaults
|
- defaults
|
||||||
dependencies:
|
dependencies:
|
||||||
- python>=3.9
|
- python=3.10.*
|
||||||
- pip=22.2.2
|
- pip=22.2.2
|
||||||
- numpy=1.23.3
|
- numpy=1.23.3
|
||||||
- torchvision=0.13.1
|
- torchvision=0.13.1
|
||||||
@ -13,8 +13,6 @@ dependencies:
|
|||||||
- cudatoolkit=11.6
|
- cudatoolkit=11.6
|
||||||
- pip:
|
- pip:
|
||||||
- albumentations==0.4.3
|
- albumentations==0.4.3
|
||||||
- basicsr==1.4.1
|
|
||||||
- dependency_injector==4.40.0
|
|
||||||
- diffusers==0.6.0
|
- diffusers==0.6.0
|
||||||
- einops==0.3.0
|
- einops==0.3.0
|
||||||
- eventlet
|
- eventlet
|
||||||
@ -27,6 +25,7 @@ dependencies:
|
|||||||
- kornia==0.6.0
|
- kornia==0.6.0
|
||||||
- omegaconf==2.2.3
|
- omegaconf==2.2.3
|
||||||
- opencv-python==4.5.5.64
|
- opencv-python==4.5.5.64
|
||||||
|
- picklescan
|
||||||
- pillow==9.2.0
|
- pillow==9.2.0
|
||||||
- pudb==2019.2
|
- pudb==2019.2
|
||||||
- pyreadline3
|
- pyreadline3
|
||||||
@ -42,5 +41,6 @@ dependencies:
|
|||||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
- git+https://github.com/invoke-ai/GFPGAN#egg=gfpgan
|
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.1#egg=gfpgan
|
||||||
|
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
||||||
- -e .
|
- -e .
|
||||||
|
@ -1,36 +1,39 @@
|
|||||||
# pip will resolve the version which matches torch
|
# pip will resolve the version which matches torch
|
||||||
albumentations
|
albumentations
|
||||||
dependency_injector==4.40.0
|
diffusers==0.10.*
|
||||||
diffusers
|
|
||||||
einops
|
einops
|
||||||
eventlet
|
eventlet
|
||||||
|
facexlib
|
||||||
flask==2.1.3
|
flask==2.1.3
|
||||||
flask_cors==3.0.10
|
flask_cors==3.0.10
|
||||||
flask_socketio==5.3.0
|
flask_socketio==5.3.0
|
||||||
flaskwebgui==0.3.7
|
flaskwebgui==1.0.3
|
||||||
getpass_asterisk
|
getpass_asterisk
|
||||||
|
gfpgan==1.3.8
|
||||||
huggingface-hub
|
huggingface-hub
|
||||||
imageio
|
imageio
|
||||||
imageio-ffmpeg
|
imageio-ffmpeg
|
||||||
kornia
|
kornia
|
||||||
numpy
|
numpy==1.23.*
|
||||||
omegaconf
|
omegaconf
|
||||||
opencv-python
|
opencv-python
|
||||||
|
picklescan
|
||||||
pillow
|
pillow
|
||||||
pip>=22
|
pip>=22
|
||||||
pudb
|
pudb
|
||||||
pyreadline3
|
pyreadline3
|
||||||
pytorch-lightning==1.7.7
|
pytorch-lightning==1.7.7
|
||||||
realesrgan
|
realesrgan
|
||||||
|
requests==2.25.1
|
||||||
scikit-image>=0.19
|
scikit-image>=0.19
|
||||||
send2trash
|
send2trash
|
||||||
streamlit
|
streamlit
|
||||||
taming-transformers-rom1504
|
taming-transformers-rom1504
|
||||||
test-tube
|
test-tube>=0.7.5
|
||||||
torch-fidelity
|
torch-fidelity
|
||||||
torchmetrics
|
torchmetrics
|
||||||
transformers==4.21.*
|
transformers==4.25.*
|
||||||
git+https://github.com/openai/CLIP.git@main#egg=clip
|
https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip#egg=k-diffusion
|
||||||
git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
https://github.com/invoke-ai/PyPatchMatch/archive/refs/tags/0.1.5.zip#egg=pypatchmatch
|
||||||
git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip#egg=clip
|
||||||
git+https://github.com/invoke-ai/GFPGAN#egg=gfpgan
|
https://github.com/invoke-ai/clipseg/archive/relaxed-python-requirement.zip#egg=clipseg
|
||||||
|