Compare commits
205 Commits
feat/ui/no
...
2.1.3-rc7
Author | SHA1 | Date | |
---|---|---|---|
9a1fe8e7fb | |||
ff56f5251b | |||
ed943bd6c7 | |||
7ad2355b1d | |||
66c920fc19 | |||
3fc5cb09f8 | |||
1345ec77ab | |||
b116715490 | |||
fa3670270e | |||
c304250ef6 | |||
802ce5dde5 | |||
311ee320ec | |||
e9df17b374 | |||
061fb4ef00 | |||
52be0d2396 | |||
4095acd10e | |||
201eb22d76 | |||
17ab982200 | |||
a04965b0e9 | |||
0b529f0c57 | |||
6f9f848345 | |||
918c1589ef | |||
116415b3fc | |||
b4b6eabaac | |||
4ef1f4a854 | |||
510fc4ebaa | |||
a20914434b | |||
0d134195fd | |||
649d8c8573 | |||
a358d370a0 | |||
94a9033c4f | |||
18a947c503 | |||
a23b031895 | |||
23af68c7d7 | |||
e258beeb51 | |||
7460c069b8 | |||
e481bfac61 | |||
5040747c67 | |||
d1ab65a431 | |||
af4ee7feb8 | |||
764fb29ade | |||
1014d3ba44 | |||
40a48aca88 | |||
92abc00f16 | |||
a5719aabf8 | |||
44a18511fa | |||
b850dbadaf | |||
9ef8b944d5 | |||
efc5a98488 | |||
1417c87928 | |||
2dd6fc2b93 | |||
22213612a0 | |||
71ee44a827 | |||
b17ca0a5e7 | |||
71bbfe4a1a | |||
5702271991 | |||
10781e7dc4 | |||
099d1157c5 | |||
ab825bf7ee | |||
10cfeb5ada | |||
e97515d045 | |||
0f04bc5789 | |||
3f74aabecd | |||
b1a99a51b7 | |||
8004f8a6d9 | |||
ff8ff2212a | |||
8e5363cd83 | |||
1450779146 | |||
8cd5d95b8a | |||
abd6407394 | |||
734dacfbe9 | |||
636620b1d5 | |||
1fe41146f0 | |||
2ad6ef355a | |||
865502ee4f | |||
c7984f3299 | |||
7f150ed833 | |||
badf4e256c | |||
e64c60bbb3 | |||
1780618543 | |||
f91fd27624 | |||
09e41e8f76 | |||
6eeb2107b3 | |||
17053ad8b7 | |||
fefb4dc1f8 | |||
d05b1b3544 | |||
82d4904c07 | |||
1cdcf33cfa | |||
6616fa835a | |||
7b9a4564b1 | |||
fcdefa0620 | |||
ef8b3ce639 | |||
36870a8f53 | |||
b70420951d | |||
1f0c5b4cf1 | |||
8648da8111 | |||
45b4593563 | |||
41b04316cf | |||
e97c6db2a3 | |||
896820a349 | |||
06c8f468bf | |||
61920e2701 | |||
f34ba7ca70 | |||
c30ef0895d | |||
aa3a774f73 | |||
2c30555b84 | |||
743f605773 | |||
519c661abb | |||
22c956c75f | |||
13696adc3a | |||
0196571a12 | |||
9666f466ab | |||
240e5486c8 | |||
8164b6b9cf | |||
4fc82d554f | |||
96b34c0f85 | |||
dd5a88dcee | |||
95ed56bf82 | |||
1ae80f5ab9 | |||
1f0bd3ca6c | |||
a1971f6830 | |||
c6118e8898 | |||
7ba958cf7f | |||
383905d5d2 | |||
6173e3e9ca | |||
3feb7d8922 | |||
1d9edbd0dd | |||
d439abdb89 | |||
ee47ea0c89 | |||
300bb2e627 | |||
ccf8593501 | |||
0fda612f3f | |||
5afff65b71 | |||
7e55bdefce | |||
620cf84d3d | |||
cfe567c62a | |||
cefe12f1df | |||
1e51c39928 | |||
42a02bbb80 | |||
f1ae6dae4c | |||
6195579910 | |||
16c8b23b34 | |||
07ae626b22 | |||
8d171bb044 | |||
6e33ca7e9e | |||
db46e12f2b | |||
868e4b2db8 | |||
2e562742c1 | |||
68e6958009 | |||
ea6e3a7949 | |||
b2879ca99f | |||
4e911566c3 | |||
9bafda6a15 | |||
871a8a5375 | |||
0eef74bc00 | |||
423ae32097 | |||
8282e5d045 | |||
19305cdbdf | |||
eb9028ab30 | |||
21483f5d07 | |||
82dcbac28f | |||
d43bd4625d | |||
ea891324a2 | |||
8fd9ea2193 | |||
fb02666856 | |||
f6f5c2731b | |||
b4e3f771e0 | |||
99bb9491ac | |||
0453f21127 | |||
9fc09aa4bd | |||
5e87062cf8 | |||
3e7a459990 | |||
bbf4c03e50 | |||
611a3a9753 | |||
1611f0d181 | |||
08835115e4 | |||
2d84e28d32 | |||
ef17aae8ab | |||
0cc39f01a3 | |||
688d7258f1 | |||
4513320bf1 | |||
533fd04ef0 | |||
dff5681cf0 | |||
5a2790a69b | |||
7c5305ccba | |||
4013e8ad6f | |||
d1dfd257f9 | |||
5322d735ee | |||
cdb107dcda | |||
be1393a41c | |||
e554c2607f | |||
6215592b12 | |||
349cc25433 | |||
214d276379 | |||
ef24d76adc | |||
ab2b5a691d | |||
c7de2b2801 | |||
e8075658ac | |||
4202dabee1 | |||
d67db2bcf1 | |||
7159ec885f | |||
b5cf734ba9 | |||
f7dc8eafee | |||
762ca60a30 | |||
e7fb9f342c |
1
.github/CODEOWNERS
vendored
@ -2,3 +2,4 @@ ldm/invoke/pngwriter.py @CapableWeb
|
|||||||
ldm/invoke/server_legacy.py @CapableWeb
|
ldm/invoke/server_legacy.py @CapableWeb
|
||||||
scripts/legacy_api.py @CapableWeb
|
scripts/legacy_api.py @CapableWeb
|
||||||
tests/legacy_tests.sh @CapableWeb
|
tests/legacy_tests.sh @CapableWeb
|
||||||
|
installer/ @tildebyte
|
||||||
|
32
.github/workflows/build-container.yml
vendored
@ -6,14 +6,22 @@ on:
|
|||||||
branches:
|
branches:
|
||||||
- 'main'
|
- 'main'
|
||||||
- 'development'
|
- 'development'
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- 'main'
|
|
||||||
- 'development'
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docker:
|
docker:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
arch:
|
||||||
|
- x86_64
|
||||||
|
- aarch64
|
||||||
|
include:
|
||||||
|
- arch: x86_64
|
||||||
|
conda-env-file: environment-lin-cuda.yml
|
||||||
|
- arch: aarch64
|
||||||
|
conda-env-file: environment-lin-aarch64.yml
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
name: ${{ matrix.arch }}
|
||||||
steps:
|
steps:
|
||||||
- name: prepare docker-tag
|
- name: prepare docker-tag
|
||||||
env:
|
env:
|
||||||
@ -25,18 +33,16 @@ jobs:
|
|||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v2
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
- name: Cache Docker layers
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: /tmp/.buildx-cache
|
|
||||||
key: buildx-${{ hashFiles('docker-build/Dockerfile') }}
|
|
||||||
- name: Build container
|
- name: Build container
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: docker-build/Dockerfile
|
file: docker-build/Dockerfile
|
||||||
platforms: linux/amd64
|
platforms: Linux/${{ matrix.arch }}
|
||||||
push: false
|
push: false
|
||||||
tags: ${{ env.dockertag }}:latest
|
tags: ${{ env.dockertag }}:${{ matrix.arch }}
|
||||||
cache-from: type=local,src=/tmp/.buildx-cache
|
build-args: |
|
||||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
conda_env_file=${{ matrix.conda-env-file }}
|
||||||
|
conda_version=py39_4.12.0-Linux-${{ matrix.arch }}
|
||||||
|
invokeai_git=${{ github.repository }}
|
||||||
|
invokeai_branch=${{ github.ref_name }}
|
||||||
|
80
.github/workflows/create-caches.yml
vendored
@ -1,80 +0,0 @@
|
|||||||
name: Create Caches
|
|
||||||
|
|
||||||
on: workflow_dispatch
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
os_matrix:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest, macos-latest]
|
|
||||||
include:
|
|
||||||
- os: ubuntu-latest
|
|
||||||
environment-file: environment.yml
|
|
||||||
default-shell: bash -l {0}
|
|
||||||
- os: macos-latest
|
|
||||||
environment-file: environment-mac.yml
|
|
||||||
default-shell: bash -l {0}
|
|
||||||
name: Test invoke.py on ${{ matrix.os }} with conda
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: ${{ matrix.default-shell }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout sources
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: setup miniconda
|
|
||||||
uses: conda-incubator/setup-miniconda@v2
|
|
||||||
with:
|
|
||||||
auto-activate-base: false
|
|
||||||
auto-update-conda: false
|
|
||||||
miniconda-version: latest
|
|
||||||
|
|
||||||
- name: set environment
|
|
||||||
run: |
|
|
||||||
[[ "$GITHUB_REF" == 'refs/heads/main' ]] \
|
|
||||||
&& echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> $GITHUB_ENV \
|
|
||||||
|| echo "TEST_PROMPTS=tests/dev_prompts.txt" >> $GITHUB_ENV
|
|
||||||
echo "CONDA_ROOT=$CONDA" >> $GITHUB_ENV
|
|
||||||
echo "CONDA_ENV_NAME=invokeai" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Use Cached Stable Diffusion v1.4 Model
|
|
||||||
id: cache-sd-v1-4
|
|
||||||
uses: actions/cache@v3
|
|
||||||
env:
|
|
||||||
cache-name: cache-sd-v1-4
|
|
||||||
with:
|
|
||||||
path: models/ldm/stable-diffusion-v1/model.ckpt
|
|
||||||
key: ${{ env.cache-name }}
|
|
||||||
restore-keys: ${{ env.cache-name }}
|
|
||||||
|
|
||||||
- name: Download Stable Diffusion v1.4 Model
|
|
||||||
if: ${{ steps.cache-sd-v1-4.outputs.cache-hit != 'true' }}
|
|
||||||
run: |
|
|
||||||
[[ -d models/ldm/stable-diffusion-v1 ]] \
|
|
||||||
|| mkdir -p models/ldm/stable-diffusion-v1
|
|
||||||
[[ -r models/ldm/stable-diffusion-v1/model.ckpt ]] \
|
|
||||||
|| curl \
|
|
||||||
-H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" \
|
|
||||||
-o models/ldm/stable-diffusion-v1/model.ckpt \
|
|
||||||
-L https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt
|
|
||||||
|
|
||||||
- name: Activate Conda Env
|
|
||||||
uses: conda-incubator/setup-miniconda@v2
|
|
||||||
with:
|
|
||||||
activate-environment: ${{ env.CONDA_ENV_NAME }}
|
|
||||||
environment-file: ${{ matrix.environment-file }}
|
|
||||||
|
|
||||||
- name: Use Cached Huggingface and Torch models
|
|
||||||
id: cache-hugginface-torch
|
|
||||||
uses: actions/cache@v3
|
|
||||||
env:
|
|
||||||
cache-name: cache-hugginface-torch
|
|
||||||
with:
|
|
||||||
path: ~/.cache
|
|
||||||
key: ${{ env.cache-name }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ env.cache-name }}-${{ hashFiles('scripts/preload_models.py') }}
|
|
||||||
|
|
||||||
- name: run preload_models.py
|
|
||||||
run: python scripts/preload_models.py
|
|
17
.github/workflows/test-invoke-conda.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
|||||||
- macOS-12
|
- macOS-12
|
||||||
include:
|
include:
|
||||||
- os: ubuntu-latest
|
- os: ubuntu-latest
|
||||||
environment-file: environment.yml
|
environment-file: environment-lin-cuda.yml
|
||||||
default-shell: bash -l {0}
|
default-shell: bash -l {0}
|
||||||
- os: macOS-12
|
- os: macOS-12
|
||||||
environment-file: environment-mac.yml
|
environment-file: environment-mac.yml
|
||||||
@ -49,6 +49,9 @@ jobs:
|
|||||||
- name: create models.yaml from example
|
- name: create models.yaml from example
|
||||||
run: cp configs/models.yaml.example configs/models.yaml
|
run: cp configs/models.yaml.example configs/models.yaml
|
||||||
|
|
||||||
|
- name: create environment.yml
|
||||||
|
run: cp environments-and-requirements/${{ matrix.environment-file }} environment.yml
|
||||||
|
|
||||||
- name: Use cached conda packages
|
- name: Use cached conda packages
|
||||||
id: use-cached-conda-packages
|
id: use-cached-conda-packages
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
@ -61,7 +64,7 @@ jobs:
|
|||||||
uses: conda-incubator/setup-miniconda@v2
|
uses: conda-incubator/setup-miniconda@v2
|
||||||
with:
|
with:
|
||||||
activate-environment: ${{ env.CONDA_ENV_NAME }}
|
activate-environment: ${{ env.CONDA_ENV_NAME }}
|
||||||
environment-file: ${{ matrix.environment-file }}
|
environment-file: environment.yml
|
||||||
miniconda-version: latest
|
miniconda-version: latest
|
||||||
|
|
||||||
- name: set test prompt to main branch validation
|
- name: set test prompt to main branch validation
|
||||||
@ -76,8 +79,18 @@ jobs:
|
|||||||
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> $GITHUB_ENV
|
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Use Cached Stable Diffusion Model
|
||||||
|
id: cache-sd-model
|
||||||
|
uses: actions/cache@v3
|
||||||
|
env:
|
||||||
|
cache-name: cache-${{ matrix.stable-diffusion-model-switch }}
|
||||||
|
with:
|
||||||
|
path: ${{ matrix.stable-diffusion-model-dl-path }}
|
||||||
|
key: ${{ env.cache-name }}
|
||||||
|
|
||||||
- name: Download ${{ matrix.stable-diffusion-model-switch }}
|
- name: Download ${{ matrix.stable-diffusion-model-switch }}
|
||||||
id: download-stable-diffusion-model
|
id: download-stable-diffusion-model
|
||||||
|
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
|
||||||
run: |
|
run: |
|
||||||
[[ -d models/ldm/stable-diffusion-v1 ]] \
|
[[ -d models/ldm/stable-diffusion-v1 ]] \
|
||||||
|| mkdir -p models/ldm/stable-diffusion-v1
|
|| mkdir -p models/ldm/stable-diffusion-v1
|
||||||
|
28
.gitignore
vendored
@ -194,6 +194,10 @@ checkpoints
|
|||||||
|
|
||||||
# Let the frontend manage its own gitignore
|
# Let the frontend manage its own gitignore
|
||||||
!frontend/*
|
!frontend/*
|
||||||
|
frontend/apt-get
|
||||||
|
frontend/dist
|
||||||
|
frontend/sudo
|
||||||
|
frontend/update
|
||||||
|
|
||||||
# Scratch folder
|
# Scratch folder
|
||||||
.scratch/
|
.scratch/
|
||||||
@ -201,6 +205,7 @@ checkpoints
|
|||||||
gfpgan/
|
gfpgan/
|
||||||
models/ldm/stable-diffusion-v1/*.sha256
|
models/ldm/stable-diffusion-v1/*.sha256
|
||||||
|
|
||||||
|
|
||||||
# GFPGAN model files
|
# GFPGAN model files
|
||||||
gfpgan/
|
gfpgan/
|
||||||
|
|
||||||
@ -208,4 +213,25 @@ gfpgan/
|
|||||||
configs/models.yaml
|
configs/models.yaml
|
||||||
|
|
||||||
# weights (will be created by installer)
|
# weights (will be created by installer)
|
||||||
models/ldm/stable-diffusion-v1/*.ckpt
|
models/ldm/stable-diffusion-v1/*.ckpt
|
||||||
|
models/clipseg
|
||||||
|
models/gfpgan
|
||||||
|
|
||||||
|
# ignore initfile
|
||||||
|
invokeai.init
|
||||||
|
|
||||||
|
# ignore environment.yml and requirements.txt
|
||||||
|
# these are links to the real files in environments-and-requirements
|
||||||
|
environment.yml
|
||||||
|
requirements.txt
|
||||||
|
|
||||||
|
# source installer files
|
||||||
|
source_installer/*zip
|
||||||
|
source_installer/invokeAI
|
||||||
|
install.bat
|
||||||
|
install.sh
|
||||||
|
update.bat
|
||||||
|
update.sh
|
||||||
|
|
||||||
|
# this may be present if the user created a venv
|
||||||
|
invokeai
|
||||||
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 466 KiB After Width: | Height: | Size: 466 KiB |
Before Width: | Height: | Size: 7.4 KiB After Width: | Height: | Size: 7.4 KiB |
Before Width: | Height: | Size: 539 KiB After Width: | Height: | Size: 539 KiB |
Before Width: | Height: | Size: 7.6 KiB After Width: | Height: | Size: 7.6 KiB |
Before Width: | Height: | Size: 450 KiB After Width: | Height: | Size: 450 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 553 KiB After Width: | Height: | Size: 553 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 418 KiB After Width: | Height: | Size: 418 KiB |
Before Width: | Height: | Size: 6.1 KiB After Width: | Height: | Size: 6.1 KiB |
Before Width: | Height: | Size: 542 KiB After Width: | Height: | Size: 542 KiB |
Before Width: | Height: | Size: 9.5 KiB After Width: | Height: | Size: 9.5 KiB |
Before Width: | Height: | Size: 395 KiB After Width: | Height: | Size: 395 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 465 KiB After Width: | Height: | Size: 465 KiB |
Before Width: | Height: | Size: 7.8 KiB After Width: | Height: | Size: 7.8 KiB |
@ -39,36 +39,46 @@ RUN apt-get update \
|
|||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# clone repository and create symlinks
|
# clone repository, create models.yaml and create symlinks
|
||||||
ARG invokeai_git=https://github.com/invoke-ai/InvokeAI.git
|
ARG invokeai_git=invoke-ai/InvokeAI
|
||||||
|
ARG invokeai_branch=main
|
||||||
ARG project_name=invokeai
|
ARG project_name=invokeai
|
||||||
RUN git clone ${invokeai_git} /${project_name} \
|
ARG conda_env_file=environment-lin-cuda.yml
|
||||||
&& mkdir /${project_name}/models/ldm/stable-diffusion-v1 \
|
RUN git clone -b ${invokeai_branch} https://github.com/${invokeai_git}.git "/${project_name}" \
|
||||||
&& ln -s /data/models/sd-v1-4.ckpt /${project_name}/models/ldm/stable-diffusion-v1/model.ckpt \
|
&& cp \
|
||||||
&& ln -s /data/outputs/ /${project_name}/outputs
|
"/${project_name}/configs/models.yaml.example" \
|
||||||
|
"/${project_name}/configs/models.yaml" \
|
||||||
|
&& ln -sf \
|
||||||
|
"/${project_name}/environments-and-requirements/${conda_env_file}" \
|
||||||
|
"/${project_name}/environment.yml" \
|
||||||
|
&& ln -sf \
|
||||||
|
/data/models/v1-5-pruned-emaonly.ckpt \
|
||||||
|
"/${project_name}/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt" \
|
||||||
|
&& ln -sf \
|
||||||
|
/data/outputs/ \
|
||||||
|
"/${project_name}/outputs"
|
||||||
|
|
||||||
# set workdir
|
# set workdir
|
||||||
WORKDIR /${project_name}
|
WORKDIR "/${project_name}"
|
||||||
|
|
||||||
# install conda env and preload models
|
# install conda env and preload models
|
||||||
ARG conda_prefix=/opt/conda
|
ARG conda_prefix=/opt/conda
|
||||||
ARG conda_env_file=environment.yml
|
COPY --from=get_miniconda "${conda_prefix}" "${conda_prefix}"
|
||||||
COPY --from=get_miniconda ${conda_prefix} ${conda_prefix}
|
RUN source "${conda_prefix}/etc/profile.d/conda.sh" \
|
||||||
RUN source ${conda_prefix}/etc/profile.d/conda.sh \
|
|
||||||
&& conda init bash \
|
&& conda init bash \
|
||||||
&& source ~/.bashrc \
|
&& source ~/.bashrc \
|
||||||
&& conda env create \
|
&& conda env create \
|
||||||
--name ${project_name} \
|
--name "${project_name}" \
|
||||||
--file ${conda_env_file} \
|
|
||||||
&& rm -Rf ~/.cache \
|
&& rm -Rf ~/.cache \
|
||||||
&& conda clean -afy \
|
&& conda clean -afy \
|
||||||
&& echo "conda activate ${project_name}" >> ~/.bashrc \
|
&& echo "conda activate ${project_name}" >> ~/.bashrc
|
||||||
&& ln -s /data/models/GFPGANv1.4.pth ./src/gfpgan/experiments/pretrained_models/GFPGANv1.4.pth \
|
|
||||||
&& conda activate ${project_name} \
|
RUN source ~/.bashrc \
|
||||||
&& python scripts/preload_models.py
|
&& python scripts/preload_models.py \
|
||||||
|
--no-interactive
|
||||||
|
|
||||||
# Copy entrypoint and set env
|
# Copy entrypoint and set env
|
||||||
ENV CONDA_PREFIX=${conda_prefix}
|
ENV CONDA_PREFIX="${conda_prefix}"
|
||||||
ENV PROJECT_NAME=${project_name}
|
ENV PROJECT_NAME="${project_name}"
|
||||||
COPY docker-build/entrypoint.sh /
|
COPY docker-build/entrypoint.sh /
|
||||||
ENTRYPOINT [ "/entrypoint.sh" ]
|
ENTRYPOINT [ "/entrypoint.sh" ]
|
||||||
|
@ -8,8 +8,9 @@ source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
|||||||
|
|
||||||
invokeai_conda_version=${INVOKEAI_CONDA_VERSION:-py39_4.12.0-${platform/\//-}}
|
invokeai_conda_version=${INVOKEAI_CONDA_VERSION:-py39_4.12.0-${platform/\//-}}
|
||||||
invokeai_conda_prefix=${INVOKEAI_CONDA_PREFIX:-\/opt\/conda}
|
invokeai_conda_prefix=${INVOKEAI_CONDA_PREFIX:-\/opt\/conda}
|
||||||
invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment.yml}
|
invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment-lin-cuda.yml}
|
||||||
invokeai_git=${INVOKEAI_GIT:-https://github.com/invoke-ai/InvokeAI.git}
|
invokeai_git=${INVOKEAI_GIT:-invoke-ai/InvokeAI}
|
||||||
|
invokeai_branch=${INVOKEAI_BRANCH:-main}
|
||||||
huggingface_token=${HUGGINGFACE_TOKEN?}
|
huggingface_token=${HUGGINGFACE_TOKEN?}
|
||||||
|
|
||||||
# print the settings
|
# print the settings
|
||||||
@ -38,11 +39,12 @@ _copyCheckpoints() {
|
|||||||
echo "creating subfolders for models and outputs"
|
echo "creating subfolders for models and outputs"
|
||||||
_runAlpine mkdir models
|
_runAlpine mkdir models
|
||||||
_runAlpine mkdir outputs
|
_runAlpine mkdir outputs
|
||||||
echo -n "downloading sd-v1-4.ckpt"
|
echo "downloading v1-5-pruned-emaonly.ckpt"
|
||||||
_runAlpine wget --header="Authorization: Bearer ${huggingface_token}" -O models/sd-v1-4.ckpt https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt
|
_runAlpine wget \
|
||||||
|
--header="Authorization: Bearer ${huggingface_token}" \
|
||||||
|
-O models/v1-5-pruned-emaonly.ckpt \
|
||||||
|
https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||||
echo "done"
|
echo "done"
|
||||||
echo "downloading GFPGANv1.4.pth"
|
|
||||||
_runAlpine wget -O models/GFPGANv1.4.pth https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_checkVolumeContent() {
|
_checkVolumeContent() {
|
||||||
@ -51,7 +53,7 @@ _checkVolumeContent() {
|
|||||||
|
|
||||||
_getModelMd5s() {
|
_getModelMd5s() {
|
||||||
_runAlpine \
|
_runAlpine \
|
||||||
alpine sh -c "md5sum /data/models/*"
|
alpine sh -c "md5sum /data/models/*.ckpt"
|
||||||
}
|
}
|
||||||
|
|
||||||
if [[ -n "$(docker volume ls -f name="${volumename}" -q)" ]]; then
|
if [[ -n "$(docker volume ls -f name="${volumename}" -q)" ]]; then
|
||||||
@ -77,5 +79,6 @@ docker build \
|
|||||||
--build-arg conda_prefix="${invokeai_conda_prefix}" \
|
--build-arg conda_prefix="${invokeai_conda_prefix}" \
|
||||||
--build-arg conda_env_file="${invokeai_conda_env_file}" \
|
--build-arg conda_env_file="${invokeai_conda_env_file}" \
|
||||||
--build-arg invokeai_git="${invokeai_git}" \
|
--build-arg invokeai_git="${invokeai_git}" \
|
||||||
|
--build-arg invokeai_branch="${invokeai_branch}" \
|
||||||
--file ./docker-build/Dockerfile \
|
--file ./docker-build/Dockerfile \
|
||||||
.
|
.
|
||||||
|
@ -4,45 +4,228 @@ title: Changelog
|
|||||||
|
|
||||||
# :octicons-log-16: **Changelog**
|
# :octicons-log-16: **Changelog**
|
||||||
|
|
||||||
## v2.0.1 (13 October 2022)
|
## v2.1.0 <small>(2 November 2022)</small>
|
||||||
|
|
||||||
- fix noisy images at high step count when using k* samplers
|
- update mac instructions to use invokeai for env name by @willwillems in
|
||||||
- dream.py script now calls invoke.py module directly rather than
|
https://github.com/invoke-ai/InvokeAI/pull/1030
|
||||||
via a new python process (which could break the environment)
|
- Update .gitignore by @blessedcoolant in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1040
|
||||||
|
- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579
|
||||||
|
missing after merge by @skurovec in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1056
|
||||||
|
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1060
|
||||||
|
- Print out the device type which is used by @manzke in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1073
|
||||||
|
- Hires Addition by @hipsterusername in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1063
|
||||||
|
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
||||||
|
@skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
|
||||||
|
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
||||||
|
warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
|
||||||
|
- fix noisy images at high step counts by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1086
|
||||||
|
- Generalize facetool strength argument by @db3000 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1078
|
||||||
|
- Enable fast switching among models at the invoke> command line by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1066
|
||||||
|
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1095
|
||||||
|
- Update generate.py by @unreleased in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1109
|
||||||
|
- Update 'ldm' env to 'invokeai' in troubleshooting steps by @19wolf in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1125
|
||||||
|
- Fixed documentation typos and resolved merge conflicts by @rupeshs in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1123
|
||||||
|
- Fix broken doc links, fix malaprop in the project subtitle by @majick in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1131
|
||||||
|
- Only output facetool parameters if enhancing faces by @db3000 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1119
|
||||||
|
- Update gitignore to ignore codeformer weights at new location by
|
||||||
|
@spezialspezial in https://github.com/invoke-ai/InvokeAI/pull/1136
|
||||||
|
- fix links to point to invoke-ai.github.io #1117 by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1143
|
||||||
|
- Rework-mkdocs by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1144
|
||||||
|
- add option to CLI and pngwriter that allows user to set PNG compression level
|
||||||
|
by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
|
||||||
|
- Fix img2img DDIM index out of bound by @wfng92 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1137
|
||||||
|
- Fix gh actions by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1128
|
||||||
|
- update mac instructions to use invokeai for env name by @willwillems in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1030
|
||||||
|
- Update .gitignore by @blessedcoolant in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1040
|
||||||
|
- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579
|
||||||
|
missing after merge by @skurovec in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1056
|
||||||
|
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1060
|
||||||
|
- Print out the device type which is used by @manzke in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1073
|
||||||
|
- Hires Addition by @hipsterusername in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1063
|
||||||
|
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
||||||
|
@skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
|
||||||
|
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
||||||
|
warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
|
||||||
|
- fix noisy images at high step counts by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1086
|
||||||
|
- Generalize facetool strength argument by @db3000 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1078
|
||||||
|
- Enable fast switching among models at the invoke> command line by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1066
|
||||||
|
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1095
|
||||||
|
- Fixed documentation typos and resolved merge conflicts by @rupeshs in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1123
|
||||||
|
- Only output facetool parameters if enhancing faces by @db3000 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1119
|
||||||
|
- add option to CLI and pngwriter that allows user to set PNG compression level
|
||||||
|
by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
|
||||||
|
- Fix img2img DDIM index out of bound by @wfng92 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1137
|
||||||
|
- Add text prompt to inpaint mask support by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1133
|
||||||
|
- Respect http[s] protocol when making socket.io middleware by @damian0815 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/976
|
||||||
|
- WebUI: Adds Codeformer support by @psychedelicious in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1151
|
||||||
|
- Skips normalizing prompts for web UI metadata by @psychedelicious in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1165
|
||||||
|
- Add Asymmetric Tiling by @carson-katri in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1132
|
||||||
|
- Web UI: Increases max CFG Scale to 200 by @psychedelicious in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1172
|
||||||
|
- Corrects color channels in face restoration; Fixes #1167 by @psychedelicious
|
||||||
|
in https://github.com/invoke-ai/InvokeAI/pull/1175
|
||||||
|
- Flips channels using array slicing instead of using OpenCV by @psychedelicious
|
||||||
|
in https://github.com/invoke-ai/InvokeAI/pull/1178
|
||||||
|
- Fix typo in docs: s/Formally/Formerly by @noodlebox in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1176
|
||||||
|
- fix clipseg loading problems by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1177
|
||||||
|
- Correct color channels in upscale using array slicing by @wfng92 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1181
|
||||||
|
- Web UI: Filters existing images when adding new images; Fixes #1085 by
|
||||||
|
@psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1171
|
||||||
|
- fix a number of bugs in textual inversion by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1190
|
||||||
|
- Improve !fetch, add !replay command by @ArDiouscuros in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/882
|
||||||
|
- Fix generation of image with s>1000 by @holstvoogd in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/951
|
||||||
|
- Web UI: Gallery improvements by @psychedelicious in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1198
|
||||||
|
- Update CLI.md by @krummrey in https://github.com/invoke-ai/InvokeAI/pull/1211
|
||||||
|
- outcropping improvements by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1207
|
||||||
|
- add support for loading VAE autoencoders by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1216
|
||||||
|
- remove duplicate fix_func for MPS by @wfng92 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1210
|
||||||
|
- Metadata storage and retrieval fixes by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1204
|
||||||
|
- nix: add shell.nix file by @Cloudef in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1170
|
||||||
|
- Web UI: Changes vite dist asset paths to relative by @psychedelicious in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1185
|
||||||
|
- Web UI: Removes isDisabled from PromptInput by @psychedelicious in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1187
|
||||||
|
- Allow user to generate images with initial noise as on M1 / mps system by
|
||||||
|
@ArDiouscuros in https://github.com/invoke-ai/InvokeAI/pull/981
|
||||||
|
- feat: adding filename format template by @plucked in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/968
|
||||||
|
- Web UI: Fixes broken bundle by @psychedelicious in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1242
|
||||||
|
- Support runwayML custom inpainting model by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1243
|
||||||
|
- Update IMG2IMG.md by @talitore in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1262
|
||||||
|
- New dockerfile - including a build- and a run- script as well as a GH-Action
|
||||||
|
by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1233
|
||||||
|
- cut over from karras to model noise schedule for higher steps by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1222
|
||||||
|
- Prompt tweaks by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1268
|
||||||
|
- Outpainting implementation by @Kyle0654 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1251
|
||||||
|
- fixing aspect ratio on hires by @tjennings in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1249
|
||||||
|
- Fix-build-container-action by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1274
|
||||||
|
- handle all unicode characters by @damian0815 in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1276
|
||||||
|
- adds models.user.yml to .gitignore by @JakeHL in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1281
|
||||||
|
- remove debug branch, set fail-fast to false by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1284
|
||||||
|
- Protect-secrets-on-pr by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1285
|
||||||
|
- Web UI: Adds initial inpainting implementation by @psychedelicious in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1225
|
||||||
|
- fix environment-mac.yml - tested on x64 and arm64 by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1289
|
||||||
|
- Use proper authentication to download model by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1287
|
||||||
|
- Prevent indexing error for mode RGB by @spezialspezial in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1294
|
||||||
|
- Integrate sd-v1-5 model into test matrix (easily expandable), remove
|
||||||
|
unecesarry caches by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1293
|
||||||
|
- add --no-interactive to preload_models step by @mauwii in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1302
|
||||||
|
- 1-click installer and updater. Uses micromamba to install git and conda into a
|
||||||
|
contained environment (if necessary) before running the normal installation
|
||||||
|
script by @cmdr2 in https://github.com/invoke-ai/InvokeAI/pull/1253
|
||||||
|
- preload_models.py script downloads the weight files by @lstein in
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pull/1290
|
||||||
|
|
||||||
|
## v2.0.1 <small>(13 October 2022)</small>
|
||||||
|
|
||||||
|
- fix noisy images at high step count when using k\* samplers
|
||||||
|
- dream.py script now calls invoke.py module directly rather than via a new
|
||||||
|
python process (which could break the environment)
|
||||||
|
|
||||||
## v2.0.0 <small>(9 October 2022)</small>
|
## v2.0.0 <small>(9 October 2022)</small>
|
||||||
|
|
||||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains for
|
||||||
for backward compatibility.
|
backward compatibility.
|
||||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||||
- Support for [inpainting](features/INPAINTING.md) and [outpainting](features/OUTPAINTING.md)
|
- Support for [inpainting](features/INPAINTING.md) and
|
||||||
- img2img runs on all k* samplers
|
[outpainting](features/OUTPAINTING.md)
|
||||||
- Support for [negative prompts](features/PROMPTS.md#negative-and-unconditioned-prompts)
|
- img2img runs on all k\* samplers
|
||||||
|
- Support for
|
||||||
|
[negative prompts](features/PROMPTS.md#negative-and-unconditioned-prompts)
|
||||||
- Support for CodeFormer face reconstruction
|
- Support for CodeFormer face reconstruction
|
||||||
- Support for Textual Inversion on Macintoshes
|
- Support for Textual Inversion on Macintoshes
|
||||||
- Support in both WebGUI and CLI for [post-processing of previously-generated images](features/POSTPROCESS.md)
|
- Support in both WebGUI and CLI for
|
||||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
[post-processing of previously-generated images](features/POSTPROCESS.md)
|
||||||
and "embiggen" upscaling. See the `!fix` command.
|
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E
|
||||||
- New `--hires` option on `invoke>` line allows [larger images to be created without duplicating elements](features/CLI.md#this-is-an-example-of-txt2img), at the cost of some performance.
|
infinite canvas), and "embiggen" upscaling. See the `!fix` command.
|
||||||
- New `--perlin` and `--threshold` options allow you to add and control variation
|
- New `--hires` option on `invoke>` line allows
|
||||||
during image generation (see [Thresholding and Perlin Noise Initialization](features/OTHER.md#thresholding-and-perlin-noise-initialization-options))
|
[larger images to be created without duplicating elements](features/CLI.md#this-is-an-example-of-txt2img),
|
||||||
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
at the cost of some performance.
|
||||||
and tweaking of previous settings.
|
- New `--perlin` and `--threshold` options allow you to add and control
|
||||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
variation during image generation (see
|
||||||
- Improved [command-line completion behavior](features/CLI.md)
|
[Thresholding and Perlin Noise Initialization](features/OTHER.md#thresholding-and-perlin-noise-initialization-options))
|
||||||
New commands added:
|
- Extensive metadata now written into PNG files, allowing reliable regeneration
|
||||||
|
of images and tweaking of previous settings.
|
||||||
|
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac
|
||||||
|
platforms.
|
||||||
|
- Improved [command-line completion behavior](features/CLI.md) New commands
|
||||||
|
added:
|
||||||
- List command-line history with `!history`
|
- List command-line history with `!history`
|
||||||
- Search command-line history with `!search`
|
- Search command-line history with `!search`
|
||||||
- Clear history with `!clear`
|
- Clear history with `!clear`
|
||||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||||
configure. To switch away from auto use the new flag like `--precision=float32`.
|
configure. To switch away from auto use the new flag like
|
||||||
|
`--precision=float32`.
|
||||||
|
|
||||||
## v1.14 <small>(11 September 2022)</small>
|
## v1.14 <small>(11 September 2022)</small>
|
||||||
|
|
||||||
- Memory optimizations for small-RAM cards. 512x512 now possible on 4 GB GPUs.
|
- Memory optimizations for small-RAM cards. 512x512 now possible on 4 GB GPUs.
|
||||||
- Full support for Apple hardware with M1 or M2 chips.
|
- Full support for Apple hardware with M1 or M2 chips.
|
||||||
- Add "seamless mode" for circular tiling of image. Generates beautiful effects.
|
- Add "seamless mode" for circular tiling of image. Generates beautiful effects.
|
||||||
([prixt](https://github.com/prixt)).
|
([prixt](https://github.com/prixt)).
|
||||||
- Inpainting support.
|
- Inpainting support.
|
||||||
- Improved web server GUI.
|
- Improved web server GUI.
|
||||||
- Lots of code and documentation cleanups.
|
- Lots of code and documentation cleanups.
|
||||||
@ -50,16 +233,17 @@ title: Changelog
|
|||||||
## v1.13 <small>(3 September 2022)</small>
|
## v1.13 <small>(3 September 2022)</small>
|
||||||
|
|
||||||
- Support image variations (see [VARIATIONS](features/VARIATIONS.md)
|
- Support image variations (see [VARIATIONS](features/VARIATIONS.md)
|
||||||
([Kevin Gibbons](https://github.com/bakkot) and many contributors and reviewers)
|
([Kevin Gibbons](https://github.com/bakkot) and many contributors and
|
||||||
- Supports a Google Colab notebook for a standalone server running on Google hardware
|
reviewers)
|
||||||
[Arturo Mendivil](https://github.com/artmen1516)
|
- Supports a Google Colab notebook for a standalone server running on Google
|
||||||
|
hardware [Arturo Mendivil](https://github.com/artmen1516)
|
||||||
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling
|
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling
|
||||||
[Kevin Gibbons](https://github.com/bakkot)
|
[Kevin Gibbons](https://github.com/bakkot)
|
||||||
- WebUI supports incremental display of in-progress images during generation
|
- WebUI supports incremental display of in-progress images during generation
|
||||||
[Kevin Gibbons](https://github.com/bakkot)
|
[Kevin Gibbons](https://github.com/bakkot)
|
||||||
- A new configuration file scheme that allows new models (including upcoming
|
- A new configuration file scheme that allows new models (including upcoming
|
||||||
stable-diffusion-v1.5) to be added without altering the code.
|
stable-diffusion-v1.5) to be added without altering the code.
|
||||||
([David Wager](https://github.com/maddavid12))
|
([David Wager](https://github.com/maddavid12))
|
||||||
- Can specify --grid on invoke.py command line as the default.
|
- Can specify --grid on invoke.py command line as the default.
|
||||||
- Miscellaneous internal bug and stability fixes.
|
- Miscellaneous internal bug and stability fixes.
|
||||||
- Works on M1 Apple hardware.
|
- Works on M1 Apple hardware.
|
||||||
@ -71,49 +255,59 @@ title: Changelog
|
|||||||
|
|
||||||
- Improved file handling, including ability to read prompts from standard input.
|
- Improved file handling, including ability to read prompts from standard input.
|
||||||
(kudos to [Yunsaki](https://github.com/yunsaki)
|
(kudos to [Yunsaki](https://github.com/yunsaki)
|
||||||
- The web server is now integrated with the invoke.py script. Invoke by adding --web to
|
- The web server is now integrated with the invoke.py script. Invoke by adding
|
||||||
the invoke.py command arguments.
|
--web to the invoke.py command arguments.
|
||||||
- Face restoration and upscaling via GFPGAN and Real-ESGAN are now automatically
|
- Face restoration and upscaling via GFPGAN and Real-ESGAN are now automatically
|
||||||
enabled if the GFPGAN directory is located as a sibling to Stable Diffusion.
|
enabled if the GFPGAN directory is located as a sibling to Stable Diffusion.
|
||||||
VRAM requirements are modestly reduced. Thanks to both [Blessedcoolant](https://github.com/blessedcoolant) and
|
VRAM requirements are modestly reduced. Thanks to both
|
||||||
|
[Blessedcoolant](https://github.com/blessedcoolant) and
|
||||||
[Oceanswave](https://github.com/oceanswave) for their work on this.
|
[Oceanswave](https://github.com/oceanswave) for their work on this.
|
||||||
- You can now swap samplers on the invoke> command line. [Blessedcoolant](https://github.com/blessedcoolant)
|
- You can now swap samplers on the invoke> command line.
|
||||||
|
[Blessedcoolant](https://github.com/blessedcoolant)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## v1.11 <small>(26 August 2022)</small>
|
## v1.11 <small>(26 August 2022)</small>
|
||||||
|
|
||||||
- NEW FEATURE: Support upscaling and face enhancement using the GFPGAN module. (kudos to [Oceanswave](https://github.com/Oceanswave)
|
- NEW FEATURE: Support upscaling and face enhancement using the GFPGAN module.
|
||||||
- You now can specify a seed of -1 to use the previous image's seed, -2 to use the seed for the image generated before that, etc.
|
(kudos to [Oceanswave](https://github.com/Oceanswave)
|
||||||
Seed memory only extends back to the previous command, but will work on all images generated with the -n# switch.
|
- You now can specify a seed of -1 to use the previous image's seed, -2 to use
|
||||||
|
the seed for the image generated before that, etc. Seed memory only extends
|
||||||
|
back to the previous command, but will work on all images generated with the
|
||||||
|
-n# switch.
|
||||||
- Variant generation support temporarily disabled pending more general solution.
|
- Variant generation support temporarily disabled pending more general solution.
|
||||||
- Created a feature branch named **yunsaki-morphing-invoke** which adds experimental support for
|
- Created a feature branch named **yunsaki-morphing-invoke** which adds
|
||||||
iteratively modifying the prompt and its parameters. Please see[Pull Request #86](https://github.com/lstein/stable-diffusion/pull/86)
|
experimental support for iteratively modifying the prompt and its parameters.
|
||||||
for a synopsis of how this works. Note that when this feature is eventually added to the main branch, it will may be modified
|
Please
|
||||||
significantly.
|
see[Pull Request #86](https://github.com/lstein/stable-diffusion/pull/86) for
|
||||||
|
a synopsis of how this works. Note that when this feature is eventually added
|
||||||
|
to the main branch, it will may be modified significantly.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## v1.10 <small>(25 August 2022)</small>
|
## v1.10 <small>(25 August 2022)</small>
|
||||||
|
|
||||||
- A barebones but fully functional interactive web server for online generation of txt2img and img2img.
|
- A barebones but fully functional interactive web server for online generation
|
||||||
|
of txt2img and img2img.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## v1.09 <small>(24 August 2022)</small>
|
## v1.09 <small>(24 August 2022)</small>
|
||||||
|
|
||||||
- A new -v option allows you to generate multiple variants of an initial image
|
- A new -v option allows you to generate multiple variants of an initial image
|
||||||
in img2img mode. (kudos to [Oceanswave](https://github.com/Oceanswave). [
|
in img2img mode. (kudos to [Oceanswave](https://github.com/Oceanswave).
|
||||||
See this discussion in the PR for examples and details on use](https://github.com/lstein/stable-diffusion/pull/71#issuecomment-1226700810))
|
[ See this discussion in the PR for examples and details on use](https://github.com/lstein/stable-diffusion/pull/71#issuecomment-1226700810))
|
||||||
- Added ability to personalize text to image generation (kudos to [Oceanswave](https://github.com/Oceanswave) and [nicolai256](https://github.com/nicolai256))
|
- Added ability to personalize text to image generation (kudos to
|
||||||
|
[Oceanswave](https://github.com/Oceanswave) and
|
||||||
|
[nicolai256](https://github.com/nicolai256))
|
||||||
- Enabled all of the samplers from k_diffusion
|
- Enabled all of the samplers from k_diffusion
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## v1.08 <small>(24 August 2022)</small>
|
## v1.08 <small>(24 August 2022)</small>
|
||||||
|
|
||||||
- Escape single quotes on the invoke> command before trying to parse. This avoids
|
- Escape single quotes on the invoke> command before trying to parse. This
|
||||||
parse errors.
|
avoids parse errors.
|
||||||
- Removed instruction to get Python3.8 as first step in Windows install.
|
- Removed instruction to get Python3.8 as first step in Windows install.
|
||||||
Anaconda3 does it for you.
|
Anaconda3 does it for you.
|
||||||
- Added bounds checks for numeric arguments that could cause crashes.
|
- Added bounds checks for numeric arguments that could cause crashes.
|
||||||
@ -123,34 +317,36 @@ title: Changelog
|
|||||||
|
|
||||||
## v1.07 <small>(23 August 2022)</small>
|
## v1.07 <small>(23 August 2022)</small>
|
||||||
|
|
||||||
- Image filenames will now never fill gaps in the sequence, but will be assigned the
|
- Image filenames will now never fill gaps in the sequence, but will be assigned
|
||||||
next higher name in the chosen directory. This ensures that the alphabetic and chronological
|
the next higher name in the chosen directory. This ensures that the alphabetic
|
||||||
sort orders are the same.
|
and chronological sort orders are the same.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## v1.06 <small>(23 August 2022)</small>
|
## v1.06 <small>(23 August 2022)</small>
|
||||||
|
|
||||||
- Added weighted prompt support contributed by [xraxra](https://github.com/xraxra)
|
- Added weighted prompt support contributed by
|
||||||
- Example of using weighted prompts to tweak a demonic figure contributed by [bmaltais](https://github.com/bmaltais)
|
[xraxra](https://github.com/xraxra)
|
||||||
|
- Example of using weighted prompts to tweak a demonic figure contributed by
|
||||||
|
[bmaltais](https://github.com/bmaltais)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## v1.05 <small>(22 August 2022 - after the drop)</small>
|
## v1.05 <small>(22 August 2022 - after the drop)</small>
|
||||||
|
|
||||||
- Filenames now use the following formats:
|
- Filenames now use the following formats: 000010.95183149.png -- Two files
|
||||||
000010.95183149.png -- Two files produced by the same command (e.g. -n2),
|
produced by the same command (e.g. -n2), 000010.26742632.png -- distinguished
|
||||||
000010.26742632.png -- distinguished by a different seed.
|
by a different seed.
|
||||||
|
|
||||||
000011.455191342.01.png -- Two files produced by the same command using
|
000011.455191342.01.png -- Two files produced by the same command using
|
||||||
000011.455191342.02.png -- a batch size>1 (e.g. -b2). They have the same seed.
|
000011.455191342.02.png -- a batch size>1 (e.g. -b2). They have the same seed.
|
||||||
|
|
||||||
000011.4160627868.grid#1-4.png -- a grid of four images (-g); the whole grid can
|
000011.4160627868.grid#1-4.png -- a grid of four images (-g); the whole grid
|
||||||
be regenerated with the indicated key
|
can be regenerated with the indicated key
|
||||||
|
|
||||||
- It should no longer be possible for one image to overwrite another
|
- It should no longer be possible for one image to overwrite another
|
||||||
- You can use the "cd" and "pwd" commands at the invoke> prompt to set and retrieve
|
- You can use the "cd" and "pwd" commands at the invoke> prompt to set and
|
||||||
the path of the output directory.
|
retrieve the path of the output directory.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@ -164,26 +360,28 @@ title: Changelog
|
|||||||
|
|
||||||
## v1.03 <small>(22 August 2022)</small>
|
## v1.03 <small>(22 August 2022)</small>
|
||||||
|
|
||||||
- The original txt2img and img2img scripts from the CompViz repository have been moved into
|
- The original txt2img and img2img scripts from the CompViz repository have been
|
||||||
a subfolder named "orig_scripts", to reduce confusion.
|
moved into a subfolder named "orig_scripts", to reduce confusion.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## v1.02 <small>(21 August 2022)</small>
|
## v1.02 <small>(21 August 2022)</small>
|
||||||
|
|
||||||
- A copy of the prompt and all of its switches and options is now stored in the corresponding
|
- A copy of the prompt and all of its switches and options is now stored in the
|
||||||
image in a tEXt metadata field named "Dream". You can read the prompt using scripts/images2prompt.py,
|
corresponding image in a tEXt metadata field named "Dream". You can read the
|
||||||
or an image editor that allows you to explore the full metadata.
|
prompt using scripts/images2prompt.py, or an image editor that allows you to
|
||||||
**Please run "conda env update" to load the k_lms dependencies!!**
|
explore the full metadata. **Please run "conda env update" to load the k_lms
|
||||||
|
dependencies!!**
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## v1.01 <small>(21 August 2022)</small>
|
## v1.01 <small>(21 August 2022)</small>
|
||||||
|
|
||||||
- added k_lms sampling.
|
- added k_lms sampling. **Please run "conda env update" to load the k_lms
|
||||||
**Please run "conda env update" to load the k_lms dependencies!!**
|
dependencies!!**
|
||||||
- use half precision arithmetic by default, resulting in faster execution and lower memory requirements
|
- use half precision arithmetic by default, resulting in faster execution and
|
||||||
Pass argument --full_precision to invoke.py to get slower but more accurate image generation
|
lower memory requirements Pass argument --full_precision to invoke.py to get
|
||||||
|
slower but more accurate image generation
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
Before Width: | Height: | Size: 284 KiB |
Before Width: | Height: | Size: 252 KiB |
Before Width: | Height: | Size: 428 KiB |
Before Width: | Height: | Size: 331 KiB |
Before Width: | Height: | Size: 369 KiB |
Before Width: | Height: | Size: 362 KiB |
Before Width: | Height: | Size: 329 KiB |
Before Width: | Height: | Size: 329 KiB |
Before Width: | Height: | Size: 377 KiB |
Before Width: | Height: | Size: 328 KiB |
Before Width: | Height: | Size: 380 KiB |
Before Width: | Height: | Size: 372 KiB |
Before Width: | Height: | Size: 401 KiB |
Before Width: | Height: | Size: 441 KiB |
Before Width: | Height: | Size: 451 KiB |
Before Width: | Height: | Size: 1.3 MiB |
Before Width: | Height: | Size: 338 KiB |
Before Width: | Height: | Size: 271 KiB |
Before Width: | Height: | Size: 353 KiB |
Before Width: | Height: | Size: 330 KiB |
Before Width: | Height: | Size: 439 KiB |
Before Width: | Height: | Size: 463 KiB |
Before Width: | Height: | Size: 444 KiB |
Before Width: | Height: | Size: 468 KiB |
Before Width: | Height: | Size: 466 KiB |
Before Width: | Height: | Size: 475 KiB |
Before Width: | Height: | Size: 429 KiB |
Before Width: | Height: | Size: 429 KiB |
Before Width: | Height: | Size: 1.3 MiB |
Before Width: | Height: | Size: 477 KiB |
Before Width: | Height: | Size: 476 KiB |
Before Width: | Height: | Size: 434 KiB |
@ -1,116 +0,0 @@
|
|||||||
## 000001.1863159593.png
|
|
||||||

|
|
||||||
|
|
||||||
banana sushi -s 50 -S 1863159593 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
## 000002.1151955949.png
|
|
||||||

|
|
||||||
|
|
||||||
banana sushi -s 50 -S 1151955949 -W 512 -H 512 -C 7.5 -A plms
|
|
||||||
## 000003.2736230502.png
|
|
||||||

|
|
||||||
|
|
||||||
banana sushi -s 50 -S 2736230502 -W 512 -H 512 -C 7.5 -A ddim
|
|
||||||
## 000004.42.png
|
|
||||||

|
|
||||||
|
|
||||||
banana sushi -s 50 -S 42 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
## 000005.42.png
|
|
||||||

|
|
||||||
|
|
||||||
banana sushi -s 50 -S 42 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
## 000006.478163327.png
|
|
||||||

|
|
||||||
|
|
||||||
banana sushi -s 50 -S 478163327 -W 640 -H 448 -C 7.5 -A k_lms
|
|
||||||
## 000007.2407640369.png
|
|
||||||

|
|
||||||
|
|
||||||
banana sushi -s 50 -S 42 -W 512 -H 512 -C 7.5 -A k_lms -V 2407640369:0.1
|
|
||||||
## 000008.2772421987.png
|
|
||||||

|
|
||||||
|
|
||||||
banana sushi -s 50 -S 42 -W 512 -H 512 -C 7.5 -A k_lms -V 2772421987:0.1
|
|
||||||
## 000009.3532317557.png
|
|
||||||

|
|
||||||
|
|
||||||
banana sushi -s 50 -S 42 -W 512 -H 512 -C 7.5 -A k_lms -V 3532317557:0.1
|
|
||||||
## 000010.2028635318.png
|
|
||||||

|
|
||||||
|
|
||||||
banana sushi -s 50 -S 2028635318 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
## 000011.1111168647.png
|
|
||||||

|
|
||||||
|
|
||||||
pond with waterlillies -s 50 -S 1111168647 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
## 000012.1476370516.png
|
|
||||||

|
|
||||||
|
|
||||||
pond with waterlillies -s 50 -S 1476370516 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
## 000013.4281108706.png
|
|
||||||

|
|
||||||
|
|
||||||
banana sushi -s 50 -S 4281108706 -W 960 -H 960 -C 7.5 -A k_lms
|
|
||||||
## 000014.2396987386.png
|
|
||||||

|
|
||||||
|
|
||||||
old sea captain with crow on shoulder -s 50 -S 2396987386 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/Lincoln-and-Parrot-512.png -A k_lms -f 0.75
|
|
||||||
## 000015.1252923272.png
|
|
||||||

|
|
||||||
|
|
||||||
old sea captain with crow on shoulder -s 50 -S 1252923272 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/Lincoln-and-Parrot-512-transparent.png -A k_lms -f 0.75
|
|
||||||
## 000016.2633891320.png
|
|
||||||

|
|
||||||
|
|
||||||
old sea captain with crow on shoulder -s 50 -S 2633891320 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/Lincoln-and-Parrot-512.png -A plms -f 0.75
|
|
||||||
## 000017.1134411920.png
|
|
||||||

|
|
||||||
|
|
||||||
old sea captain with crow on shoulder -s 50 -S 1134411920 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/Lincoln-and-Parrot-512.png -A k_euler_a -f 0.75
|
|
||||||
## 000018.47.png
|
|
||||||

|
|
||||||
|
|
||||||
big red dog playing with cat -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
## 000019.47.png
|
|
||||||

|
|
||||||
|
|
||||||
big red++++ dog playing with cat -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
## 000020.47.png
|
|
||||||

|
|
||||||
|
|
||||||
big red dog playing with cat+++ -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
## 000021.47.png
|
|
||||||

|
|
||||||
|
|
||||||
big (red dog).swap(tiger) playing with cat -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
## 000022.47.png
|
|
||||||

|
|
||||||
|
|
||||||
dog:1,cat:2 -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
## 000023.47.png
|
|
||||||

|
|
||||||
|
|
||||||
dog:2,cat:1 -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
## 000024.1029061431.png
|
|
||||||

|
|
||||||
|
|
||||||
medusa with cobras -s 50 -S 1029061431 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/curly.png -A k_lms -f 0.75 -tm hair
|
|
||||||
## 000025.1284519352.png
|
|
||||||

|
|
||||||
|
|
||||||
bearded man -s 50 -S 1284519352 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/curly.png -A k_lms -f 0.75 -tm face
|
|
||||||
## curly.942491079.gfpgan.png
|
|
||||||

|
|
||||||
|
|
||||||
!fix ./docs/assets/preflight-checks/inputs/curly.png -s 50 -S 942491079 -W 512 -H 512 -C 7.5 -A k_lms -G 0.8 -ft gfpgan -U 2.0 0.75
|
|
||||||
## curly.942491079.outcrop.png
|
|
||||||

|
|
||||||
|
|
||||||
!fix ./docs/assets/preflight-checks/inputs/curly.png -s 50 -S 942491079 -W 512 -H 512 -C 7.5 -A k_lms -c top 64
|
|
||||||
## curly.942491079.outpaint.png
|
|
||||||

|
|
||||||
|
|
||||||
!fix ./docs/assets/preflight-checks/inputs/curly.png -s 50 -S 942491079 -W 512 -H 512 -C 7.5 -A k_lms -D top 64
|
|
||||||
## curly.942491079.outcrop-01.png
|
|
||||||

|
|
||||||
|
|
||||||
!fix ./docs/assets/preflight-checks/inputs/curly.png -s 50 -S 942491079 -W 512 -H 512 -C 7.5 -A k_lms -c top 64
|
|
@ -1,29 +0,0 @@
|
|||||||
outputs/preflight/000001.1863159593.png: banana sushi -s 50 -S 1863159593 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
outputs/preflight/000002.1151955949.png: banana sushi -s 50 -S 1151955949 -W 512 -H 512 -C 7.5 -A plms
|
|
||||||
outputs/preflight/000003.2736230502.png: banana sushi -s 50 -S 2736230502 -W 512 -H 512 -C 7.5 -A ddim
|
|
||||||
outputs/preflight/000004.42.png: banana sushi -s 50 -S 42 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
outputs/preflight/000005.42.png: banana sushi -s 50 -S 42 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
outputs/preflight/000006.478163327.png: banana sushi -s 50 -S 478163327 -W 640 -H 448 -C 7.5 -A k_lms
|
|
||||||
outputs/preflight/000007.2407640369.png: banana sushi -s 50 -S 42 -W 512 -H 512 -C 7.5 -A k_lms -V 2407640369:0.1
|
|
||||||
outputs/preflight/000008.2772421987.png: banana sushi -s 50 -S 42 -W 512 -H 512 -C 7.5 -A k_lms -V 2772421987:0.1
|
|
||||||
outputs/preflight/000009.3532317557.png: banana sushi -s 50 -S 42 -W 512 -H 512 -C 7.5 -A k_lms -V 3532317557:0.1
|
|
||||||
outputs/preflight/000010.2028635318.png: banana sushi -s 50 -S 2028635318 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
outputs/preflight/000011.1111168647.png: pond with waterlillies -s 50 -S 1111168647 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
outputs/preflight/000012.1476370516.png: pond with waterlillies -s 50 -S 1476370516 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
outputs/preflight/000013.4281108706.png: banana sushi -s 50 -S 4281108706 -W 960 -H 960 -C 7.5 -A k_lms
|
|
||||||
outputs/preflight/000014.2396987386.png: old sea captain with crow on shoulder -s 50 -S 2396987386 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/Lincoln-and-Parrot-512.png -A k_lms -f 0.75
|
|
||||||
outputs/preflight/000015.1252923272.png: old sea captain with crow on shoulder -s 50 -S 1252923272 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/Lincoln-and-Parrot-512-transparent.png -A k_lms -f 0.75
|
|
||||||
outputs/preflight/000016.2633891320.png: old sea captain with crow on shoulder -s 50 -S 2633891320 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/Lincoln-and-Parrot-512.png -A plms -f 0.75
|
|
||||||
outputs/preflight/000017.1134411920.png: old sea captain with crow on shoulder -s 50 -S 1134411920 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/Lincoln-and-Parrot-512.png -A k_euler_a -f 0.75
|
|
||||||
outputs/preflight/000018.47.png: big red dog playing with cat -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
outputs/preflight/000019.47.png: big red++++ dog playing with cat -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
outputs/preflight/000020.47.png: big red dog playing with cat+++ -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
outputs/preflight/000021.47.png: big (red dog).swap(tiger) playing with cat -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
outputs/preflight/000022.47.png: dog:1,cat:2 -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
outputs/preflight/000023.47.png: dog:2,cat:1 -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
outputs/preflight/000024.1029061431.png: medusa with cobras -s 50 -S 1029061431 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/curly.png -A k_lms -f 0.75 -tm hair
|
|
||||||
outputs/preflight/000025.1284519352.png: bearded man -s 50 -S 1284519352 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/curly.png -A k_lms -f 0.75 -tm face
|
|
||||||
outputs/preflight/curly.942491079.gfpgan.png: !fix ./docs/assets/preflight-checks/inputs/curly.png -s 50 -S 942491079 -W 512 -H 512 -C 7.5 -A k_lms -G 0.8 -ft gfpgan -U 2.0 0.75
|
|
||||||
outputs/preflight/curly.942491079.outcrop.png: !fix ./docs/assets/preflight-checks/inputs/curly.png -s 50 -S 942491079 -W 512 -H 512 -C 7.5 -A k_lms -c top 64
|
|
||||||
outputs/preflight/curly.942491079.outpaint.png: !fix ./docs/assets/preflight-checks/inputs/curly.png -s 50 -S 942491079 -W 512 -H 512 -C 7.5 -A k_lms -D top 64
|
|
||||||
outputs/preflight/curly.942491079.outcrop-01.png: !fix ./docs/assets/preflight-checks/inputs/curly.png -s 50 -S 942491079 -W 512 -H 512 -C 7.5 -A k_lms -c top 64
|
|
@ -1,61 +0,0 @@
|
|||||||
# outputs/preflight/000001.1863159593.png
|
|
||||||
banana sushi -s 50 -S 1863159593 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
# outputs/preflight/000002.1151955949.png
|
|
||||||
banana sushi -s 50 -S 1151955949 -W 512 -H 512 -C 7.5 -A plms
|
|
||||||
# outputs/preflight/000003.2736230502.png
|
|
||||||
banana sushi -s 50 -S 2736230502 -W 512 -H 512 -C 7.5 -A ddim
|
|
||||||
# outputs/preflight/000004.42.png
|
|
||||||
banana sushi -s 50 -S 42 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
# outputs/preflight/000005.42.png
|
|
||||||
banana sushi -s 50 -S 42 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
# outputs/preflight/000006.478163327.png
|
|
||||||
banana sushi -s 50 -S 478163327 -W 640 -H 448 -C 7.5 -A k_lms
|
|
||||||
# outputs/preflight/000007.2407640369.png
|
|
||||||
banana sushi -s 50 -S 42 -W 512 -H 512 -C 7.5 -A k_lms -V 2407640369:0.1
|
|
||||||
# outputs/preflight/000007.2772421987.png
|
|
||||||
banana sushi -s 50 -S 42 -W 512 -H 512 -C 7.5 -A k_lms -V 2772421987:0.1
|
|
||||||
# outputs/preflight/000007.3532317557.png
|
|
||||||
banana sushi -s 50 -S 42 -W 512 -H 512 -C 7.5 -A k_lms -V 3532317557:0.1
|
|
||||||
# outputs/preflight/000008.2028635318.png
|
|
||||||
banana sushi -s 50 -S 2028635318 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
# outputs/preflight/000009.1111168647.png
|
|
||||||
pond with waterlillies -s 50 -S 1111168647 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
# outputs/preflight/000010.1476370516.png
|
|
||||||
pond with waterlillies -s 50 -S 1476370516 -W 512 -H 512 -C 7.5 -A k_lms --seamless
|
|
||||||
# outputs/preflight/000011.4281108706.png
|
|
||||||
banana sushi -s 50 -S 4281108706 -W 960 -H 960 -C 7.5 -A k_lms
|
|
||||||
# outputs/preflight/000012.2396987386.png
|
|
||||||
old sea captain with crow on shoulder -s 50 -S 2396987386 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/Lincoln-and-Parrot-512.png -A k_lms -f 0.75
|
|
||||||
# outputs/preflight/000013.1252923272.png
|
|
||||||
old sea captain with crow on shoulder -s 50 -S 1252923272 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/Lincoln-and-Parrot-512-transparent.png -A k_lms -f 0.75
|
|
||||||
# outputs/preflight/000014.2633891320.png
|
|
||||||
old sea captain with crow on shoulder -s 50 -S 2633891320 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/Lincoln-and-Parrot-512.png -A plms -f 0.75
|
|
||||||
# outputs/preflight/000015.1134411920.png
|
|
||||||
old sea captain with crow on shoulder -s 50 -S 1134411920 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/Lincoln-and-Parrot-512.png -A k_euler_a -f 0.75
|
|
||||||
# outputs/preflight/000016.42.png
|
|
||||||
big red dog playing with cat -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
# outputs/preflight/000017.42.png
|
|
||||||
big red++++ dog playing with cat -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
# outputs/preflight/000018.42.png
|
|
||||||
big red dog playing with cat+++ -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
# outputs/preflight/000019.42.png
|
|
||||||
big (red dog).swap(tiger) playing with cat -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
# outputs/preflight/000020.42.png
|
|
||||||
dog:1,cat:2 -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
# outputs/preflight/000021.42.png
|
|
||||||
dog:2,cat:1 -s 50 -S 47 -W 512 -H 512 -C 7.5 -A k_lms
|
|
||||||
# outputs/preflight/000022.1029061431.png
|
|
||||||
medusa with cobras -s 50 -S 1029061431 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/curly.png -A k_lms -f 0.75 -tm hair
|
|
||||||
# outputs/preflight/000023.1284519352.png
|
|
||||||
bearded man -s 50 -S 1284519352 -W 512 -H 512 -C 7.5 -I docs/assets/preflight-checks/inputs/curly.png -A k_lms -f 0.75 -tm face
|
|
||||||
# outputs/preflight/000024.curly.hair.deselected.png
|
|
||||||
!mask -I docs/assets/preflight-checks/inputs/curly.png -tm hair
|
|
||||||
# outputs/preflight/curly.942491079.gfpgan.png
|
|
||||||
!fix ./docs/assets/preflight-checks/inputs/curly.png -U2 -G0.8
|
|
||||||
# outputs/preflight/curly.942491079.outcrop.png
|
|
||||||
!fix ./docs/assets/preflight-checks/inputs/curly.png -c top 64
|
|
||||||
# outputs/preflight/curly.942491079.outpaint.png
|
|
||||||
!fix ./docs/assets/preflight-checks/inputs/curly.png -D top 64
|
|
||||||
# outputs/preflight/curly.942491079.outcrop-01.png
|
|
||||||
!switch inpainting-1.5
|
|
||||||
!fix ./docs/assets/preflight-checks/inputs/curly.png -c top 64
|
|
@ -1,7 +1,5 @@
|
|||||||
---
|
---
|
||||||
title: CLI
|
title: CLI
|
||||||
hide:
|
|
||||||
- toc
|
|
||||||
---
|
---
|
||||||
|
|
||||||
# :material-bash: CLI
|
# :material-bash: CLI
|
||||||
@ -93,6 +91,10 @@ overridden on a per-prompt basis (see
|
|||||||
| `--port <port>` | | `9090` | Which port web server should listen for requests on. |
|
| `--port <port>` | | `9090` | Which port web server should listen for requests on. |
|
||||||
| `--config <path>` | | `configs/models.yaml` | Configuration file for models and their weights. |
|
| `--config <path>` | | `configs/models.yaml` | Configuration file for models and their weights. |
|
||||||
| `--iterations <int>` | `-n<int>` | `1` | How many images to generate per prompt. |
|
| `--iterations <int>` | `-n<int>` | `1` | How many images to generate per prompt. |
|
||||||
|
| `--width <int>` | `-W<int>` | `512` | Width of generated image |
|
||||||
|
| `--height <int>` | `-H<int>` | `512` | Height of generated image | `--steps <int>` | `-s<int>` | `50` | How many steps of refinement to apply |
|
||||||
|
| `--strength <float>` | `-s<float>` | `0.75` | For img2img: how hard to try to match the prompt to the initial image. Ranges from 0.0-0.99, with higher values replacing the initial image completely. |
|
||||||
|
| `--fit` | `-F` | `False` | For img2img: scale the init image to fit into the specified -H and -W dimensions |
|
||||||
| `--grid` | `-g` | `False` | Save all image series as a grid rather than individually. |
|
| `--grid` | `-g` | `False` | Save all image series as a grid rather than individually. |
|
||||||
| `--sampler <sampler>` | `-A<sampler>` | `k_lms` | Sampler to use. Use `-h` to get list of available samplers. |
|
| `--sampler <sampler>` | `-A<sampler>` | `k_lms` | Sampler to use. Use `-h` to get list of available samplers. |
|
||||||
| `--seamless` | | `False` | Create interesting effects by tiling elements of the image. |
|
| `--seamless` | | `False` | Create interesting effects by tiling elements of the image. |
|
||||||
@ -108,7 +110,7 @@ overridden on a per-prompt basis (see
|
|||||||
|
|
||||||
| Argument | Shortcut | Default | Description |
|
| Argument | Shortcut | Default | Description |
|
||||||
|--------------------|------------|---------------------|--------------|
|
|--------------------|------------|---------------------|--------------|
|
||||||
| `--weights <path>` | | `None` | Pth to weights file; use `--model stable-diffusion-1.4` instead |
|
| `--weights <path>` | | `None` | Path to weights file; use `--model stable-diffusion-1.4` instead |
|
||||||
| `--laion400m` | `-l` | `False` | Use older LAION400m weights; use `--model=laion400m` instead |
|
| `--laion400m` | `-l` | `False` | Use older LAION400m weights; use `--model=laion400m` instead |
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
@ -121,6 +123,29 @@ overridden on a per-prompt basis (see
|
|||||||
You can either double your slashes (ick): `C:\\path\\to\\my\\file`, or
|
You can either double your slashes (ick): `C:\\path\\to\\my\\file`, or
|
||||||
use Linux/Mac style forward slashes (better): `C:/path/to/my/file`.
|
use Linux/Mac style forward slashes (better): `C:/path/to/my/file`.
|
||||||
|
|
||||||
|
## The .invokeai initialization file
|
||||||
|
|
||||||
|
To start up invoke.py with your preferred settings, place your desired
|
||||||
|
startup options in a file in your home directory named `.invokeai` The
|
||||||
|
file should contain the startup options as you would type them on the
|
||||||
|
command line (`--steps=10 --grid`), one argument per line, or a
|
||||||
|
mixture of both using any of the accepted command switch formats:
|
||||||
|
|
||||||
|
!!! example ""
|
||||||
|
|
||||||
|
```bash
|
||||||
|
--web
|
||||||
|
--steps=28
|
||||||
|
--grid
|
||||||
|
-f 0.6 -C 11.0 -A k_euler_a
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that the initialization file only accepts the command line arguments.
|
||||||
|
There are additional arguments that you can provide on the `invoke>` command
|
||||||
|
line (such as `-n` or `--iterations`) that cannot be entered into this file.
|
||||||
|
Also be alert for empty blank lines at the end of the file, which will cause
|
||||||
|
an arguments error at startup time.
|
||||||
|
|
||||||
## List of prompt arguments
|
## List of prompt arguments
|
||||||
|
|
||||||
After the invoke.py script initializes, it will present you with a `invoke>`
|
After the invoke.py script initializes, it will present you with a `invoke>`
|
||||||
|
@ -6,10 +6,11 @@ title: Image-to-Image
|
|||||||
|
|
||||||
## `img2img`
|
## `img2img`
|
||||||
|
|
||||||
This script also provides an `img2img` feature that lets you seed your creations with an initial
|
This script also provides an `img2img` feature that lets you seed your creations
|
||||||
drawing or photo. This is a really cool feature that tells stable diffusion to build the prompt on
|
with an initial drawing or photo. This is a really cool feature that tells
|
||||||
top of the image you provide, preserving the original's basic shape and layout. To use it, provide
|
stable diffusion to build the prompt on top of the image you provide, preserving
|
||||||
the `--init_img` option as shown here:
|
the original's basic shape and layout. To use it, provide the `--init_img`
|
||||||
|
option as shown here:
|
||||||
|
|
||||||
```commandline
|
```commandline
|
||||||
tree on a hill with a river, nature photograph, national geographic -I./test-pictures/tree-and-river-sketch.png -f 0.85
|
tree on a hill with a river, nature photograph, national geographic -I./test-pictures/tree-and-river-sketch.png -f 0.85
|
||||||
@ -18,63 +19,76 @@ tree on a hill with a river, nature photograph, national geographic -I./test-pic
|
|||||||
This will take the original image shown here:
|
This will take the original image shown here:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
<img src="https://user-images.githubusercontent.com/50542132/193946000-c42a96d8-5a74-4f8a-b4c3-5213e6cadcce.png" width=350>
|
{ width=320 }
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
and generate a new image based on it as shown here:
|
and generate a new image based on it as shown here:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
<img src="https://user-images.githubusercontent.com/111189/194135515-53d4c060-e994-4016-8121-7c685e281ac9.png" width=350>
|
{ width=320 }
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
The `--init_img` (`-I`) option gives the path to the seed picture. `--strength` (`-f`) controls how much
|
The `--init_img` (`-I`) option gives the path to the seed picture. `--strength`
|
||||||
the original will be modified, ranging from `0.0` (keep the original intact), to `1.0` (ignore the
|
(`-f`) controls how much the original will be modified, ranging from `0.0` (keep
|
||||||
original completely). The default is `0.75`, and ranges from `0.25-0.90` give interesting results.
|
the original intact), to `1.0` (ignore the original completely). The default is
|
||||||
Other relevant options include `-C` (classification free guidance scale), and `-s` (steps). Unlike `txt2img`,
|
`0.75`, and ranges from `0.25-0.90` give interesting results. Other relevant
|
||||||
adding steps will continuously change the resulting image and it will not converge.
|
options include `-C` (classification free guidance scale), and `-s` (steps).
|
||||||
|
Unlike `txt2img`, adding steps will continuously change the resulting image and
|
||||||
|
it will not converge.
|
||||||
|
|
||||||
You may also pass a `-v<variation_amount>` option to generate `-n<iterations>` count variants on
|
You may also pass a `-v<variation_amount>` option to generate `-n<iterations>`
|
||||||
the original image. This is done by passing the first generated image
|
count variants on the original image. This is done by passing the first
|
||||||
back into img2img the requested number of times. It generates
|
generated image back into img2img the requested number of times. It generates
|
||||||
interesting variants.
|
interesting variants.
|
||||||
|
|
||||||
Note that the prompt makes a big difference. For example, this slight variation on the prompt produces
|
Note that the prompt makes a big difference. For example, this slight variation
|
||||||
a very different image:
|
on the prompt produces a very different image:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
<img src="https://user-images.githubusercontent.com/111189/194135220-16b62181-b60c-4248-8989-4834a8fd7fbd.png" width=350>
|
{ width=320 }
|
||||||
<caption markdown>photograph of a tree on a hill with a river</caption>
|
<caption markdown>photograph of a tree on a hill with a river</caption>
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
!!! tip
|
!!! tip
|
||||||
|
|
||||||
When designing prompts, think about how the images scraped from the internet were captioned. Very few photographs will
|
When designing prompts, think about how the images scraped from the internet were
|
||||||
be labeled "photograph" or "photorealistic." They will, however, be captioned with the publication, photographer, camera
|
captioned. Very few photographs will be labeled "photograph" or "photorealistic."
|
||||||
model, or film settings.
|
They will, however, be captioned with the publication, photographer, camera model,
|
||||||
|
or film settings.
|
||||||
|
|
||||||
If the initial image contains transparent regions, then Stable Diffusion will only draw within the
|
If the initial image contains transparent regions, then Stable Diffusion will
|
||||||
transparent regions, a process called [`inpainting`](./INPAINTING.md#creating-transparent-regions-for-inpainting). However, for this to work correctly, the color
|
only draw within the transparent regions, a process called
|
||||||
information underneath the transparent needs to be preserved, not erased.
|
[`inpainting`](./INPAINTING.md#creating-transparent-regions-for-inpainting).
|
||||||
|
However, for this to work correctly, the color information underneath the
|
||||||
|
transparent needs to be preserved, not erased.
|
||||||
|
|
||||||
!!! warning
|
!!! warning "**IMPORTANT ISSUE** "
|
||||||
|
|
||||||
**IMPORTANT ISSUE** `img2img` does not work properly on initial images smaller than 512x512. Please scale your
|
`img2img` does not work properly on initial images smaller
|
||||||
image to at least 512x512 before using it. Larger images are not a problem, but may run out of VRAM on your
|
than 512x512. Please scale your image to at least 512x512 before using it.
|
||||||
GPU card. To fix this, use the --fit option, which downscales the initial image to fit within the box specified
|
Larger images are not a problem, but may run out of VRAM on your GPU card. To
|
||||||
by width x height:
|
fix this, use the --fit option, which downscales the initial image to fit within
|
||||||
~~~
|
the box specified by width x height:
|
||||||
tree on a hill with a river, national geographic -I./test-pictures/big-sketch.png -H512 -W512 --fit
|
|
||||||
~~~
|
```
|
||||||
|
tree on a hill with a river, national geographic -I./test-pictures/big-sketch.png -H512 -W512 --fit
|
||||||
|
```
|
||||||
|
|
||||||
## How does it actually work, though?
|
## How does it actually work, though?
|
||||||
|
|
||||||
The main difference between `img2img` and `prompt2img` is the starting point. While `prompt2img` always starts with pure
|
The main difference between `img2img` and `prompt2img` is the starting point.
|
||||||
gaussian noise and progressively refines it over the requested number of steps, `img2img` skips some of these earlier steps
|
While `prompt2img` always starts with pure gaussian noise and progressively
|
||||||
(how many it skips is indirectly controlled by the `--strength` parameter), and uses instead your initial image mixed with gaussian noise as the starting image.
|
refines it over the requested number of steps, `img2img` skips some of these
|
||||||
|
earlier steps (how many it skips is indirectly controlled by the `--strength`
|
||||||
|
parameter), and uses instead your initial image mixed with gaussian noise as the
|
||||||
|
starting image.
|
||||||
|
|
||||||
**Let's start** by thinking about vanilla `prompt2img`, just generating an image from a prompt. If the step count is 10, then the "latent space" (Stable Diffusion's internal representation of the image) for the prompt "fire" with seed `1592514025` develops something like this:
|
**Let's start** by thinking about vanilla `prompt2img`, just generating an image
|
||||||
|
from a prompt. If the step count is 10, then the "latent space" (Stable
|
||||||
|
Diffusion's internal representation of the image) for the prompt "fire" with
|
||||||
|
seed `1592514025` develops something like this:
|
||||||
|
|
||||||
```commandline
|
```bash
|
||||||
invoke> "fire" -s10 -W384 -H384 -S1592514025
|
invoke> "fire" -s10 -W384 -H384 -S1592514025
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -82,9 +96,16 @@ invoke> "fire" -s10 -W384 -H384 -S1592514025
|
|||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
Put simply: starting from a frame of fuzz/static, SD finds details in each frame that it thinks look like "fire" and brings them a little bit more into focus, gradually scrubbing out the fuzz until a clear image remains.
|
Put simply: starting from a frame of fuzz/static, SD finds details in each frame
|
||||||
|
that it thinks look like "fire" and brings them a little bit more into focus,
|
||||||
|
gradually scrubbing out the fuzz until a clear image remains.
|
||||||
|
|
||||||
**When you use `img2img`** some of the earlier steps are cut, and instead an initial image of your choice is used. But because of how the maths behind Stable Diffusion works, this image needs to be mixed with just the right amount of noise (fuzz/static) for where it is being inserted. This is where the strength parameter comes in. Depending on the set strength, your image will be inserted into the sequence at the appropriate point, with just the right amount of noise.
|
**When you use `img2img`** some of the earlier steps are cut, and instead an
|
||||||
|
initial image of your choice is used. But because of how the maths behind Stable
|
||||||
|
Diffusion works, this image needs to be mixed with just the right amount of
|
||||||
|
noise (fuzz/static) for where it is being inserted. This is where the strength
|
||||||
|
parameter comes in. Depending on the set strength, your image will be inserted
|
||||||
|
into the sequence at the appropriate point, with just the right amount of noise.
|
||||||
|
|
||||||
### A concrete example
|
### A concrete example
|
||||||
|
|
||||||
@ -94,7 +115,9 @@ I want SD to draw a fire based on this hand-drawn image:
|
|||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
Let's only do 10 steps, to make it easier to see what's happening. If strength is `0.7`, this is what the internal steps the algorithm has to take will look like:
|
Let's only do 10 steps, to make it easier to see what's happening. If strength
|
||||||
|
is `0.7`, this is what the internal steps the algorithm has to take will look
|
||||||
|
like:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||

|

|
||||||
@ -106,33 +129,49 @@ With strength `0.4`, the steps look more like this:
|
|||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
Notice how much more fuzzy the starting image is for strength `0.7` compared to `0.4`, and notice also how much longer the sequence is with `0.7`:
|
Notice how much more fuzzy the starting image is for strength `0.7` compared to
|
||||||
|
`0.4`, and notice also how much longer the sequence is with `0.7`:
|
||||||
|
|
||||||
| | strength = 0.7 | strength = 0.4 |
|
| | strength = 0.7 | strength = 0.4 |
|
||||||
| -- | -- | -- |
|
| --------------------------- | ------------------------------------------------------------- | ------------------------------------------------------------- |
|
||||||
| initial image that SD sees |  |  |
|
| initial image that SD sees |  |  |
|
||||||
| steps argument to `invoke>` | `-S10` | `-S10` |
|
| steps argument to `invoke>` | `-S10` | `-S10` |
|
||||||
| steps actually taken | 7 | 4 |
|
| steps actually taken | `7` | `4` |
|
||||||
| latent space at each step |  |  |
|
| latent space at each step |  |  |
|
||||||
| output |  |  |
|
| output |  |  |
|
||||||
|
|
||||||
Both of the outputs look kind of like what I was thinking of. With the strength higher, my input becomes more vague, *and* Stable Diffusion has more steps to refine its output. But it's not really making what I want, which is a picture of cheery open fire. With the strength lower, my input is more clear, *but* Stable Diffusion has less chance to refine itself, so the result ends up inheriting all the problems of my bad drawing.
|
Both of the outputs look kind of like what I was thinking of. With the strength
|
||||||
|
higher, my input becomes more vague, _and_ Stable Diffusion has more steps to
|
||||||
|
refine its output. But it's not really making what I want, which is a picture of
|
||||||
|
cheery open fire. With the strength lower, my input is more clear, _but_ Stable
|
||||||
|
Diffusion has less chance to refine itself, so the result ends up inheriting all
|
||||||
|
the problems of my bad drawing.
|
||||||
|
|
||||||
If you want to try this out yourself, all of these are using a seed of `1592514025` with a width/height of `384`, step count `10`, the default sampler (`k_lms`), and the single-word prompt `"fire"`:
|
If you want to try this out yourself, all of these are using a seed of
|
||||||
|
`1592514025` with a width/height of `384`, step count `10`, the default sampler
|
||||||
|
(`k_lms`), and the single-word prompt `"fire"`:
|
||||||
|
|
||||||
```commandline
|
```bash
|
||||||
invoke> "fire" -s10 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png --strength 0.7
|
invoke> "fire" -s10 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png --strength 0.7
|
||||||
```
|
```
|
||||||
|
|
||||||
The code for rendering intermediates is on my (damian0815's) branch [document-img2img](https://github.com/damian0815/InvokeAI/tree/document-img2img) - run `invoke.py` and check your `outputs/img-samples/intermediates` folder while generating an image.
|
The code for rendering intermediates is on my (damian0815's) branch
|
||||||
|
[document-img2img](https://github.com/damian0815/InvokeAI/tree/document-img2img) -
|
||||||
|
run `invoke.py` and check your `outputs/img-samples/intermediates` folder while
|
||||||
|
generating an image.
|
||||||
|
|
||||||
### Compensating for the reduced step count
|
### Compensating for the reduced step count
|
||||||
|
|
||||||
After putting this guide together I was curious to see how the difference would be if I increased the step count to compensate, so that SD could have the same amount of steps to develop the image regardless of the strength. So I ran the generation again using the same seed, but this time adapting the step count to give each generation 20 steps.
|
After putting this guide together I was curious to see how the difference would
|
||||||
|
be if I increased the step count to compensate, so that SD could have the same
|
||||||
|
amount of steps to develop the image regardless of the strength. So I ran the
|
||||||
|
generation again using the same seed, but this time adapting the step count to
|
||||||
|
give each generation 20 steps.
|
||||||
|
|
||||||
Here's strength `0.4` (note step count `50`, which is `20 ÷ 0.4` to make sure SD does `20` steps from my image):
|
Here's strength `0.4` (note step count `50`, which is `20 ÷ 0.4` to make sure SD
|
||||||
|
does `20` steps from my image):
|
||||||
|
|
||||||
```commandline
|
```bash
|
||||||
invoke> "fire" -s50 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.4
|
invoke> "fire" -s50 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.4
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -140,7 +179,8 @@ invoke> "fire" -s50 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.4
|
|||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
and here is strength `0.7` (note step count `30`, which is roughly `20 ÷ 0.7` to make sure SD does `20` steps from my image):
|
and here is strength `0.7` (note step count `30`, which is roughly `20 ÷ 0.7` to
|
||||||
|
make sure SD does `20` steps from my image):
|
||||||
|
|
||||||
```commandline
|
```commandline
|
||||||
invoke> "fire" -s30 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.7
|
invoke> "fire" -s30 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.7
|
||||||
@ -150,7 +190,11 @@ invoke> "fire" -s30 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.7
|
|||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
In both cases the image is nice and clean and "finished", but because at strength `0.7` Stable Diffusion has been give so much more freedom to improve on my badly-drawn flames, they've come out looking much better. You can really see the difference when looking at the latent steps. There's more noise on the first image with strength `0.7`:
|
In both cases the image is nice and clean and "finished", but because at
|
||||||
|
strength `0.7` Stable Diffusion has been give so much more freedom to improve on
|
||||||
|
my badly-drawn flames, they've come out looking much better. You can really see
|
||||||
|
the difference when looking at the latent steps. There's more noise on the first
|
||||||
|
image with strength `0.7`:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||

|

|
||||||
@ -162,15 +206,19 @@ than there is for strength `0.4`:
|
|||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
and that extra noise gives the algorithm more choices when it is evaluating how to denoise any particular pixel in the image.
|
and that extra noise gives the algorithm more choices when it is evaluating how
|
||||||
|
to denoise any particular pixel in the image.
|
||||||
|
|
||||||
Unfortunately, it seems that `img2img` is very sensitive to the step count. Here's strength `0.7` with a step count of `29` (SD did 19 steps from my image):
|
Unfortunately, it seems that `img2img` is very sensitive to the step count.
|
||||||
|
Here's strength `0.7` with a step count of `29` (SD did 19 steps from my image):
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
By comparing the latents we can sort of see that something got interpreted differently enough on the third or fourth step to lead to a rather different interpretation of the flames.
|
By comparing the latents we can sort of see that something got interpreted
|
||||||
|
differently enough on the third or fourth step to lead to a rather different
|
||||||
|
interpretation of the flames.
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||

|

|
||||||
@ -180,4 +228,9 @@ By comparing the latents we can sort of see that something got interpreted diffe
|
|||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
This is the result of a difference in the de-noising "schedule" - basically the noise has to be cleaned by a certain degree each step or the model won't "converge" on the image properly (see [stable diffusion blog](https://huggingface.co/blog/stable_diffusion) for more about that). A different step count means a different schedule, which means things get interpreted slightly differently at every step.
|
This is the result of a difference in the de-noising "schedule" - basically the
|
||||||
|
noise has to be cleaned by a certain degree each step or the model won't
|
||||||
|
"converge" on the image properly (see
|
||||||
|
[stable diffusion blog](https://huggingface.co/blog/stable_diffusion) for more
|
||||||
|
about that). A different step count means a different schedule, which means
|
||||||
|
things get interpreted slightly differently at every step.
|
||||||
|
@ -6,61 +6,63 @@ title: Outpainting
|
|||||||
|
|
||||||
## Outpainting and outcropping
|
## Outpainting and outcropping
|
||||||
|
|
||||||
Outpainting is a process by which the AI generates parts of the image
|
Outpainting is a process by which the AI generates parts of the image that are
|
||||||
that are outside its original frame. It can be used to fix up images
|
outside its original frame. It can be used to fix up images in which the subject
|
||||||
in which the subject is off center, or when some detail (often the top
|
is off center, or when some detail (often the top of someone's head!) is cut
|
||||||
of someone's head!) is cut off.
|
off.
|
||||||
|
|
||||||
InvokeAI supports two versions of outpainting, one called "outpaint"
|
InvokeAI supports two versions of outpainting, one called "outpaint" and the
|
||||||
and the other "outcrop." They work slightly differently and each has
|
other "outcrop." They work slightly differently and each has its advantages and
|
||||||
its advantages and drawbacks.
|
drawbacks.
|
||||||
|
|
||||||
### Outpainting
|
### Outpainting
|
||||||
|
|
||||||
Outpainting is the same as inpainting, except that the painting occurs
|
Outpainting is the same as inpainting, except that the painting occurs in the
|
||||||
in the regions outside of the original image. To outpaint using the
|
regions outside of the original image. To outpaint using the `invoke.py` command
|
||||||
`invoke.py` command line script, prepare an image in which the borders
|
line script, prepare an image in which the borders to be extended are pure
|
||||||
to be extended are pure black. Add an alpha channel (if there isn't one
|
black. Add an alpha channel (if there isn't one already), and make the borders
|
||||||
already), and make the borders completely transparent and the interior
|
completely transparent and the interior completely opaque. If you wish to modify
|
||||||
completely opaque. If you wish to modify the interior as well, you may
|
the interior as well, you may create transparent holes in the transparency
|
||||||
create transparent holes in the transparency layer, which `img2img` will
|
layer, which `img2img` will paint into as usual.
|
||||||
paint into as usual.
|
|
||||||
|
|
||||||
Pass the image as the argument to the `-I` switch as you would for
|
Pass the image as the argument to the `-I` switch as you would for regular
|
||||||
regular inpainting:
|
inpainting:
|
||||||
|
|
||||||
invoke> a stream by a river -I /path/to/transparent_img.png
|
```bash
|
||||||
|
invoke> a stream by a river -I /path/to/transparent_img.png
|
||||||
|
```
|
||||||
|
|
||||||
You'll likely be delighted by the results.
|
You'll likely be delighted by the results.
|
||||||
|
|
||||||
### Tips
|
### Tips
|
||||||
|
|
||||||
1. Do not try to expand the image too much at once. Generally it is best
|
1. Do not try to expand the image too much at once. Generally it is best to
|
||||||
to expand the margins in 64-pixel increments. 128 pixels often works,
|
expand the margins in 64-pixel increments. 128 pixels often works, but your
|
||||||
but your mileage may vary depending on the nature of the image you are
|
mileage may vary depending on the nature of the image you are trying to
|
||||||
trying to outpaint into.
|
outpaint into.
|
||||||
|
|
||||||
2. There are a series of switches that can be used to adjust how the
|
2. There are a series of switches that can be used to adjust how the inpainting
|
||||||
inpainting algorithm operates. In particular, you can use these to
|
algorithm operates. In particular, you can use these to minimize the seam
|
||||||
minimize the seam that sometimes appears between the original image
|
that sometimes appears between the original image and the extended part.
|
||||||
and the extended part. These switches are:
|
These switches are:
|
||||||
|
|
||||||
--seam_size SEAM_SIZE Size of the mask around the seam between original and outpainted image (0)
|
| switch | default | description |
|
||||||
--seam_blur SEAM_BLUR The amount to blur the seam inwards (0)
|
| -------------------------- | ------- | ---------------------------------------------------------------------- |
|
||||||
--seam_strength STRENGTH The img2img strength to use when filling the seam (0.7)
|
| `--seam_size SEAM_SIZE ` | `0` | Size of the mask around the seam between original and outpainted image |
|
||||||
--seam_steps SEAM_STEPS The number of steps to use to fill the seam. (10)
|
| `--seam_blur SEAM_BLUR` | `0` | The amount to blur the seam inwards |
|
||||||
--tile_size TILE_SIZE The tile size to use for filling outpaint areas (32)
|
| `--seam_strength STRENGTH` | `0.7` | The img2img strength to use when filling the seam |
|
||||||
|
| `--seam_steps SEAM_STEPS` | `10` | The number of steps to use to fill the seam. |
|
||||||
|
| `--tile_size TILE_SIZE` | `32` | The tile size to use for filling outpaint areas |
|
||||||
|
|
||||||
### Outcrop
|
### Outcrop
|
||||||
|
|
||||||
The `outcrop` extension gives you a convenient `!fix` postprocessing
|
The `outcrop` extension gives you a convenient `!fix` postprocessing command
|
||||||
command that allows you to extend a previously-generated image in 64
|
that allows you to extend a previously-generated image in 64 pixel increments in
|
||||||
pixel increments in any direction. You can apply the module to any
|
any direction. You can apply the module to any image previously-generated by
|
||||||
image previously-generated by InvokeAI. Note that it works with
|
InvokeAI. Note that it works with arbitrary PNG photographs, but not currently
|
||||||
arbitrary PNG photographs, but not currently with JPG or other
|
with JPG or other formats. Outcropping is particularly effective when combined
|
||||||
formats. Outcropping is particularly effective when combined with the
|
with the
|
||||||
[runwayML custom inpainting
|
[runwayML custom inpainting model](INPAINTING.md#using-the-runwayml-inpainting-model).
|
||||||
model](INPAINTING.md#using-the-runwayml-inpainting-model).
|
|
||||||
|
|
||||||
Consider this image:
|
Consider this image:
|
||||||
|
|
||||||
@ -68,18 +70,17 @@ Consider this image:
|
|||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
Pretty nice, but it's annoying that the top of her head is cut
|
Pretty nice, but it's annoying that the top of her head is cut off. She's also a
|
||||||
off. She's also a bit off center. Let's fix that!
|
bit off center. Let's fix that!
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
invoke> !fix images/curly.png --outcrop top 128 right 64 bottom 64
|
invoke> !fix images/curly.png --outcrop top 128 right 64 bottom 64
|
||||||
```
|
```
|
||||||
|
|
||||||
This is saying to apply the `outcrop` extension by extending the top
|
This is saying to apply the `outcrop` extension by extending the top of the
|
||||||
of the image by 128 pixels, and the right and bottom of the image by
|
image by 128 pixels, and the right and bottom of the image by 64 pixels. You can
|
||||||
64 pixels. You can use any combination of top|left|right|bottom, and
|
use any combination of top|left|right|bottom, and specify any number of pixels
|
||||||
specify any number of pixels to extend. You can also abbreviate
|
to extend. You can also abbreviate `--outcrop` to `-c`.
|
||||||
`--outcrop` to `-c`.
|
|
||||||
|
|
||||||
The result looks like this:
|
The result looks like this:
|
||||||
|
|
||||||
@ -87,39 +88,53 @@ The result looks like this:
|
|||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
The new image is larger than the original (576x704)
|
The new image is larger than the original (576x704) because 64 pixels were added
|
||||||
because 64 pixels were added to the top and right sides. You will
|
to the top and right sides. You will need enough VRAM to process an image of
|
||||||
need enough VRAM to process an image of this size.
|
this size.
|
||||||
|
|
||||||
|
#### Outcropping non-InvokeAI images
|
||||||
|
|
||||||
|
You can outcrop an arbitrary image that was not generated by InvokeAI,
|
||||||
|
but your results will vary. The `inpainting-1.5` model is highly
|
||||||
|
recommended, but if not feasible, then you may be able to improve the
|
||||||
|
output by conditioning the outcropping with a text prompt that
|
||||||
|
describes the scene using the `--new_prompt` argument:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke> !fix images/vacation.png --outcrop top 128 --new_prompt "family vacation"
|
||||||
|
```
|
||||||
|
|
||||||
|
You may also provide a different seed for outcropping to use by passing
|
||||||
|
`-S<seed>`. A negative seed will generate a new random seed.
|
||||||
|
|
||||||
A number of caveats:
|
A number of caveats:
|
||||||
|
|
||||||
1. Although you can specify any pixel values, they will be rounded up
|
1. Although you can specify any pixel values, they will be rounded up to the
|
||||||
to the nearest multiple of 64. Smaller values are better. Larger
|
nearest multiple of 64. Smaller values are better. Larger extensions are more
|
||||||
extensions are more likely to generate artefacts. However, if you wish
|
likely to generate artefacts. However, if you wish you can run the !fix
|
||||||
you can run the !fix command repeatedly to cautiously expand the
|
command repeatedly to cautiously expand the image.
|
||||||
image.
|
|
||||||
|
|
||||||
2. The extension is stochastic, meaning that each time you run it
|
2. The extension is stochastic, meaning that each time you run it you'll get a
|
||||||
you'll get a slightly different result. You can run it repeatedly
|
slightly different result. You can run it repeatedly until you get an image
|
||||||
until you get an image you like. Unfortunately `!fix` does not
|
you like. Unfortunately `!fix` does not currently respect the `-n`
|
||||||
currently respect the `-n` (`--iterations`) argument.
|
(`--iterations`) argument.
|
||||||
|
|
||||||
3. Your results will be _much_ better if you use the `inpaint-1.5`
|
3. Your results will be _much_ better if you use the `inpaint-1.5` model
|
||||||
model released by runwayML and installed by default by
|
released by runwayML and installed by default by `scripts/preload_models.py`.
|
||||||
`scripts/preload_models.py`. This model was trained specifically to
|
This model was trained specifically to harmoniously fill in image gaps. The
|
||||||
harmoniously fill in image gaps. The standard model will work as well,
|
standard model will work as well, but you may notice color discontinuities at
|
||||||
but you may notice color discontinuities at the border.
|
the border.
|
||||||
|
|
||||||
4. When using the `inpaint-1.5` model, you may notice subtle changes
|
4. When using the `inpaint-1.5` model, you may notice subtle changes to the area
|
||||||
to the area within the original image. This is because the model
|
outside the masked region. This is because the model performs an
|
||||||
performs an encoding/decoding on the image as a whole. This does not
|
encoding/decoding on the image as a whole. This does not occur with the
|
||||||
occur with the standard model.
|
standard model.
|
||||||
|
|
||||||
## Outpaint
|
## Outpaint
|
||||||
|
|
||||||
The `outpaint` extension does the same thing, but with subtle
|
The `outpaint` extension does the same thing, but with subtle differences.
|
||||||
differences. Starting with the same image, here is how we would add an
|
Starting with the same image, here is how we would add an additional 64 pixels
|
||||||
additional 64 pixels to the top of the image:
|
to the top of the image:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
invoke> !fix images/curly.png --out_direction top 64
|
invoke> !fix images/curly.png --out_direction top 64
|
||||||
@ -138,15 +153,15 @@ outcropping:
|
|||||||
|
|
||||||
- You can only specify one direction to extend at a time.
|
- You can only specify one direction to extend at a time.
|
||||||
- The image is **not** resized. Instead, the image is shifted by the specified
|
- The image is **not** resized. Instead, the image is shifted by the specified
|
||||||
number of pixels. If you look carefully, you'll see that less of the lady's
|
number of pixels. If you look carefully, you'll see that less of the lady's
|
||||||
torso is visible in the image.
|
torso is visible in the image.
|
||||||
- Because the image dimensions remain the same, there's no rounding
|
- Because the image dimensions remain the same, there's no rounding to multiples
|
||||||
to multiples of 64.
|
of 64.
|
||||||
- Attempting to outpaint larger areas will frequently give rise to ugly
|
- Attempting to outpaint larger areas will frequently give rise to ugly ghosting
|
||||||
ghosting effects.
|
effects.
|
||||||
- For best results, try increasing the step number.
|
- For best results, try increasing the step number.
|
||||||
- If you don't specify a pixel value in `-D`, it will default to half
|
- If you don't specify a pixel value in `-D`, it will default to half of the
|
||||||
of the whole image, which is likely not what you want.
|
whole image, which is likely not what you want.
|
||||||
|
|
||||||
!!! tip
|
!!! tip
|
||||||
|
|
||||||
|
@ -19,14 +19,14 @@ applied after the fact.
|
|||||||
|
|
||||||
The default face restoration module is GFPGAN. The default upscale is
|
The default face restoration module is GFPGAN. The default upscale is
|
||||||
Real-ESRGAN. For an alternative face restoration module, see [CodeFormer
|
Real-ESRGAN. For an alternative face restoration module, see [CodeFormer
|
||||||
Support] below.
|
Support](#codeformer-support) below.
|
||||||
|
|
||||||
As of version 1.14, environment.yaml will install the Real-ESRGAN
|
As of version 1.14, environment.yaml will install the Real-ESRGAN
|
||||||
package into the standard install location for python packages, and
|
package into the standard install location for python packages, and
|
||||||
will put GFPGAN into a subdirectory of "src" in the InvokeAI
|
will put GFPGAN into a subdirectory of "src" in the InvokeAI
|
||||||
directory. Upscaling with Real-ESRGAN should "just work" without
|
directory. Upscaling with Real-ESRGAN should "just work" without
|
||||||
further intervention. Simply pass the --upscale (-U) option on the
|
further intervention. Simply pass the `--upscale` (`-U`) option on the
|
||||||
invoke> command line, or indicate the desired scale on the popup in
|
`invoke>` command line, or indicate the desired scale on the popup in
|
||||||
the Web GUI.
|
the Web GUI.
|
||||||
|
|
||||||
**GFPGAN** requires a series of downloadable model files to
|
**GFPGAN** requires a series of downloadable model files to
|
||||||
|
@ -6,14 +6,15 @@ title: Prompting-Features
|
|||||||
|
|
||||||
## **Reading Prompts from a File**
|
## **Reading Prompts from a File**
|
||||||
|
|
||||||
You can automate `invoke.py` by providing a text file with the prompts you want to run, one line per
|
You can automate `invoke.py` by providing a text file with the prompts you want
|
||||||
prompt. The text file must be composed with a text editor (e.g. Notepad) and not a word processor.
|
to run, one line per prompt. The text file must be composed with a text editor
|
||||||
Each line should look like what you would type at the invoke> prompt:
|
(e.g. Notepad) and not a word processor. Each line should look like what you
|
||||||
|
would type at the invoke> prompt:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
a beautiful sunny day in the park, children playing -n4 -C10
|
"a beautiful sunny day in the park, children playing" -n4 -C10
|
||||||
stormy weather on a mountain top, goats grazing -s100
|
"stormy weather on a mountain top, goats grazing" -s100
|
||||||
innovative packaging for a squid's dinner -S137038382
|
"innovative packaging for a squid's dinner" -S137038382
|
||||||
```
|
```
|
||||||
|
|
||||||
Then pass this file's name to `invoke.py` when you invoke it:
|
Then pass this file's name to `invoke.py` when you invoke it:
|
||||||
@ -22,7 +23,8 @@ Then pass this file's name to `invoke.py` when you invoke it:
|
|||||||
(invokeai) ~/stable-diffusion$ python3 scripts/invoke.py --from_file "path/to/prompts.txt"
|
(invokeai) ~/stable-diffusion$ python3 scripts/invoke.py --from_file "path/to/prompts.txt"
|
||||||
```
|
```
|
||||||
|
|
||||||
You may read a series of prompts from standard input by providing a filename of `-`:
|
You may read a series of prompts from standard input by providing a filename of
|
||||||
|
`-`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
(invokeai) ~/stable-diffusion$ echo "a beautiful day" | python3 scripts/invoke.py --from_file -
|
(invokeai) ~/stable-diffusion$ echo "a beautiful day" | python3 scripts/invoke.py --from_file -
|
||||||
@ -32,14 +34,15 @@ You may read a series of prompts from standard input by providing a filename of
|
|||||||
|
|
||||||
## **Negative and Unconditioned Prompts**
|
## **Negative and Unconditioned Prompts**
|
||||||
|
|
||||||
Any words between a pair of square brackets will instruct Stable
|
Any words between a pair of square brackets will instruct Stable Diffusion to
|
||||||
Diffusion to attempt to ban the concept from the generated image.
|
attempt to ban the concept from the generated image.
|
||||||
|
|
||||||
```text
|
```text
|
||||||
this is a test prompt [not really] to make you understand [cool] how this works.
|
this is a test prompt [not really] to make you understand [cool] how this works.
|
||||||
```
|
```
|
||||||
|
|
||||||
In the above statement, the words 'not really cool` will be ignored by Stable Diffusion.
|
In the above statement, the words 'not really cool` will be ignored by Stable
|
||||||
|
Diffusion.
|
||||||
|
|
||||||
Here's a prompt that depicts what it does.
|
Here's a prompt that depicts what it does.
|
||||||
|
|
||||||
@ -51,7 +54,9 @@ original prompt:
|
|||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
That image has a woman, so if we want the horse without a rider, we can influence the image not to have a woman by putting [woman] in the prompt, like this:
|
That image has a woman, so if we want the horse without a rider, we can
|
||||||
|
influence the image not to have a woman by putting [woman] in the prompt, like
|
||||||
|
this:
|
||||||
|
|
||||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||||
|
|
||||||
@ -59,7 +64,8 @@ That image has a woman, so if we want the horse without a rider, we can influenc
|
|||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
That's nice - but say we also don't want the image to be quite so blue. We can add "blue" to the list of negative prompts, so it's now [woman blue]:
|
That's nice - but say we also don't want the image to be quite so blue. We can
|
||||||
|
add "blue" to the list of negative prompts, so it's now [woman blue]:
|
||||||
|
|
||||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||||
|
|
||||||
@ -67,7 +73,8 @@ That's nice - but say we also don't want the image to be quite so blue. We can a
|
|||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
Getting close - but there's no sense in having a saddle when our horse doesn't have a rider, so we'll add one more negative prompt: [woman blue saddle].
|
Getting close - but there's no sense in having a saddle when our horse doesn't
|
||||||
|
have a rider, so we'll add one more negative prompt: [woman blue saddle].
|
||||||
|
|
||||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue saddle]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue saddle]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||||
|
|
||||||
@ -88,91 +95,128 @@ Getting close - but there's no sense in having a saddle when our horse doesn't h
|
|||||||
|
|
||||||
The InvokeAI prompting language has the following features:
|
The InvokeAI prompting language has the following features:
|
||||||
|
|
||||||
### Attention weighting
|
### Attention weighting
|
||||||
Append a word or phrase with `-` or `+`, or a weight between `0` and `2` (`1`=default), to decrease or increase "attention" (= a mix of per-token CFG weighting multiplier and, for `-`, a weighted blend with the prompt without the term).
|
|
||||||
|
Append a word or phrase with `-` or `+`, or a weight between `0` and `2`
|
||||||
|
(`1`=default), to decrease or increase "attention" (= a mix of per-token CFG
|
||||||
|
weighting multiplier and, for `-`, a weighted blend with the prompt without the
|
||||||
|
term).
|
||||||
|
|
||||||
The following syntax is recognised:
|
The following syntax is recognised:
|
||||||
* single words without parentheses: `a tall thin man picking apricots+`
|
|
||||||
* single or multiple words with parentheses: `a tall thin man picking (apricots)+` `a tall thin man picking (apricots)-` `a tall thin man (picking apricots)+` `a tall thin man (picking apricots)-`
|
|
||||||
* more effect with more symbols `a tall thin man (picking apricots)++`
|
|
||||||
* nesting `a tall thin man (picking apricots+)++` (`apricots` effectively gets `+++`)
|
|
||||||
* all of the above with explicit numbers `a tall thin man picking (apricots)1.1` `a tall thin man (picking (apricots)1.3)1.1`. (`+` is equivalent to 1.1, `++` is pow(1.1,2), `+++` is pow(1.1,3), etc; `-` means 0.9, `--` means pow(0.9,2), etc.)
|
|
||||||
* attention also applies to `[unconditioning]` so `a tall thin man picking apricots [(ladder)0.01]` will *very gently* nudge SD away from trying to draw the man on a ladder
|
|
||||||
|
|
||||||
You can use this to increase or decrease the amount of something. Starting from this prompt of `a man picking apricots from a tree`, let's see what happens if we increase and decrease how much attention we want Stable Diffusion to pay to the word `apricots`:
|
- single words without parentheses: `a tall thin man picking apricots+`
|
||||||
|
- single or multiple words with parentheses:
|
||||||
|
`a tall thin man picking (apricots)+` `a tall thin man picking (apricots)-`
|
||||||
|
`a tall thin man (picking apricots)+` `a tall thin man (picking apricots)-`
|
||||||
|
- more effect with more symbols `a tall thin man (picking apricots)++`
|
||||||
|
- nesting `a tall thin man (picking apricots+)++` (`apricots` effectively gets
|
||||||
|
`+++`)
|
||||||
|
- all of the above with explicit numbers `a tall thin man picking (apricots)1.1`
|
||||||
|
`a tall thin man (picking (apricots)1.3)1.1`. (`+` is equivalent to 1.1, `++`
|
||||||
|
is pow(1.1,2), `+++` is pow(1.1,3), etc; `-` means 0.9, `--` means pow(0.9,2),
|
||||||
|
etc.)
|
||||||
|
- attention also applies to `[unconditioning]` so
|
||||||
|
`a tall thin man picking apricots [(ladder)0.01]` will _very gently_ nudge SD
|
||||||
|
away from trying to draw the man on a ladder
|
||||||
|
|
||||||
|
You can use this to increase or decrease the amount of something. Starting from
|
||||||
|
this prompt of `a man picking apricots from a tree`, let's see what happens if
|
||||||
|
we increase and decrease how much attention we want Stable Diffusion to pay to
|
||||||
|
the word `apricots`:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Using `-` to reduce apricot-ness:
|
Using `-` to reduce apricot-ness:
|
||||||
|
|
||||||
| `a man picking apricots- from a tree` | `a man picking apricots-- from a tree` | `a man picking apricots--- from a tree` |
|
| `a man picking apricots- from a tree` | `a man picking apricots-- from a tree` | `a man picking apricots--- from a tree` |
|
||||||
| -- | -- | -- |
|
| ------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||||
|  |  |  |
|
|  |  |  |
|
||||||
|
|
||||||
Using `+` to increase apricot-ness:
|
Using `+` to increase apricot-ness:
|
||||||
|
|
||||||
| `a man picking apricots+ from a tree` | `a man picking apricots++ from a tree` | `a man picking apricots+++ from a tree` | `a man picking apricots++++ from a tree` | `a man picking apricots+++++ from a tree` |
|
| `a man picking apricots+ from a tree` | `a man picking apricots++ from a tree` | `a man picking apricots+++ from a tree` | `a man picking apricots++++ from a tree` | `a man picking apricots+++++ from a tree` |
|
||||||
| -- | -- | -- | -- | -- |
|
| ------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|  |  |  |  |  |
|
|  |  |  |  |  |
|
||||||
|
|
||||||
You can also change the balance between different parts of a prompt. For example, below is a `mountain man`:
|
You can also change the balance between different parts of a prompt. For
|
||||||
|
example, below is a `mountain man`:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
And here he is with more mountain:
|
And here he is with more mountain:
|
||||||
|
|
||||||
| `mountain+ man` | `mountain++ man` | `mountain+++ man` |
|
| `mountain+ man` | `mountain++ man` | `mountain+++ man` |
|
||||||
| -- | -- | -- |
|
| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- |
|
||||||
|  |  |  |
|
|  |  |  |
|
||||||
|
|
||||||
Or, alternatively, with more man:
|
Or, alternatively, with more man:
|
||||||
|
|
||||||
| `mountain man+` | `mountain man++` | `mountain man+++` | `mountain man++++` |
|
| `mountain man+` | `mountain man++` | `mountain man+++` | `mountain man++++` |
|
||||||
| -- | -- | -- | -- |
|
| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- |
|
||||||
|  |  |  |  |
|
|  |  |  |  |
|
||||||
|
|
||||||
### Blending between prompts
|
### Blending between prompts
|
||||||
|
|
||||||
* `("a tall thin man picking apricots", "a tall thin man picking pears").blend(1,1)`
|
- `("a tall thin man picking apricots", "a tall thin man picking pears").blend(1,1)`
|
||||||
* The existing prompt blending using `:<weight>` will continue to be supported - `("a tall thin man picking apricots", "a tall thin man picking pears").blend(1,1)` is equivalent to `a tall thin man picking apricots:1 a tall thin man picking pears:1` in the old syntax.
|
- The existing prompt blending using `:<weight>` will continue to be supported -
|
||||||
* Attention weights can be nested inside blends.
|
`("a tall thin man picking apricots", "a tall thin man picking pears").blend(1,1)`
|
||||||
* Non-normalized blends are supported by passing `no_normalize` as an additional argument to the blend weights, eg `("a tall thin man picking apricots", "a tall thin man picking pears").blend(1,-1,no_normalize)`. very fun to explore local maxima in the feature space, but also easy to produce garbage output.
|
is equivalent to
|
||||||
|
`a tall thin man picking apricots:1 a tall thin man picking pears:1` in the
|
||||||
|
old syntax.
|
||||||
|
- Attention weights can be nested inside blends.
|
||||||
|
- Non-normalized blends are supported by passing `no_normalize` as an additional
|
||||||
|
argument to the blend weights, eg
|
||||||
|
`("a tall thin man picking apricots", "a tall thin man picking pears").blend(1,-1,no_normalize)`.
|
||||||
|
very fun to explore local maxima in the feature space, but also easy to
|
||||||
|
produce garbage output.
|
||||||
|
|
||||||
See the section below on "Prompt Blending" for more information about how this works.
|
See the section below on "Prompt Blending" for more information about how this
|
||||||
|
works.
|
||||||
|
|
||||||
### Cross-Attention Control ('prompt2prompt')
|
### Cross-Attention Control ('prompt2prompt')
|
||||||
|
|
||||||
Sometimes an image you generate is almost right, and you just want to
|
Sometimes an image you generate is almost right, and you just want to change one
|
||||||
change one detail without affecting the rest. You could use a photo editor and inpainting
|
detail without affecting the rest. You could use a photo editor and inpainting
|
||||||
to overpaint the area, but that's a pain. Here's where `prompt2prompt`
|
to overpaint the area, but that's a pain. Here's where `prompt2prompt` comes in
|
||||||
comes in handy.
|
handy.
|
||||||
|
|
||||||
Generate an image with a given prompt, record the seed of the image,
|
Generate an image with a given prompt, record the seed of the image, and then
|
||||||
and then use the `prompt2prompt` syntax to substitute words in the
|
use the `prompt2prompt` syntax to substitute words in the original prompt for
|
||||||
original prompt for words in a new prompt. This works for `img2img` as well.
|
words in a new prompt. This works for `img2img` as well.
|
||||||
|
|
||||||
* `a ("fluffy cat").swap("smiling dog") eating a hotdog`.
|
- `a ("fluffy cat").swap("smiling dog") eating a hotdog`.
|
||||||
* quotes optional: `a (fluffy cat).swap(smiling dog) eating a hotdog`.
|
- quotes optional: `a (fluffy cat).swap(smiling dog) eating a hotdog`.
|
||||||
* for single word substitutions parentheses are also optional: `a cat.swap(dog) eating a hotdog`.
|
- for single word substitutions parentheses are also optional:
|
||||||
* Supports options `s_start`, `s_end`, `t_start`, `t_end` (each 0-1) loosely corresponding to bloc97's `prompt_edit_spatial_start/_end` and `prompt_edit_tokens_start/_end` but with the math swapped to make it easier to intuitively understand.
|
`a cat.swap(dog) eating a hotdog`.
|
||||||
* Example usage:`a (cat).swap(dog, s_end=0.3) eating a hotdog` - the `s_end` argument means that the "spatial" (self-attention) edit will stop having any effect after 30% (=0.3) of the steps have been done, leaving Stable Diffusion with 70% of the steps where it is free to decide for itself how to reshape the cat-form into a dog form.
|
- Supports options `s_start`, `s_end`, `t_start`, `t_end` (each 0-1) loosely
|
||||||
* The numbers represent a percentage through the step sequence where the edits should happen. 0 means the start (noisy starting image), 1 is the end (final image).
|
corresponding to bloc97's `prompt_edit_spatial_start/_end` and
|
||||||
* For img2img, the step sequence does not start at 0 but instead at (1-strength) - so if strength is 0.7, s_start and s_end must both be greater than 0.3 (1-0.7) to have any effect.
|
`prompt_edit_tokens_start/_end` but with the math swapped to make it easier to
|
||||||
* Convenience option `shape_freedom` (0-1) to specify how much "freedom" Stable Diffusion should have to change the shape of the subject being swapped.
|
intuitively understand.
|
||||||
* `a (cat).swap(dog, shape_freedom=0.5) eating a hotdog`.
|
- Example usage:`a (cat).swap(dog, s_end=0.3) eating a hotdog` - the `s_end`
|
||||||
|
argument means that the "spatial" (self-attention) edit will stop having any
|
||||||
|
effect after 30% (=0.3) of the steps have been done, leaving Stable
|
||||||
|
Diffusion with 70% of the steps where it is free to decide for itself how to
|
||||||
|
reshape the cat-form into a dog form.
|
||||||
|
- The numbers represent a percentage through the step sequence where the edits
|
||||||
|
should happen. 0 means the start (noisy starting image), 1 is the end (final
|
||||||
|
image).
|
||||||
|
- For img2img, the step sequence does not start at 0 but instead at
|
||||||
|
(1-strength) - so if strength is 0.7, s_start and s_end must both be
|
||||||
|
greater than 0.3 (1-0.7) to have any effect.
|
||||||
|
- Convenience option `shape_freedom` (0-1) to specify how much "freedom" Stable
|
||||||
|
Diffusion should have to change the shape of the subject being swapped.
|
||||||
|
- `a (cat).swap(dog, shape_freedom=0.5) eating a hotdog`.
|
||||||
|
|
||||||
|
The `prompt2prompt` code is based off
|
||||||
|
[bloc97's colab](https://github.com/bloc97/CrossAttentionControl).
|
||||||
|
|
||||||
|
Note that `prompt2prompt` is not currently working with the runwayML inpainting
|
||||||
|
model, and may never work due to the way this model is set up. If you attempt to
|
||||||
|
use `prompt2prompt` you will get the original image back. However, since this
|
||||||
|
model is so good at inpainting, a good substitute is to use the `clipseg` text
|
||||||
|
masking option:
|
||||||
|
|
||||||
The `prompt2prompt` code is based off [bloc97's
|
```bash
|
||||||
colab](https://github.com/bloc97/CrossAttentionControl).
|
|
||||||
|
|
||||||
Note that `prompt2prompt` is not currently working with the runwayML
|
|
||||||
inpainting model, and may never work due to the way this model is set
|
|
||||||
up. If you attempt to use `prompt2prompt` you will get the original
|
|
||||||
image back. However, since this model is so good at inpainting, a
|
|
||||||
good substitute is to use the `clipseg` text masking option:
|
|
||||||
|
|
||||||
```
|
|
||||||
invoke> a fluffy cat eating a hotdot
|
invoke> a fluffy cat eating a hotdot
|
||||||
Outputs:
|
Outputs:
|
||||||
[1010] outputs/000025.2182095108.png: a fluffy cat eating a hotdog
|
[1010] outputs/000025.2182095108.png: a fluffy cat eating a hotdog
|
||||||
@ -181,94 +225,96 @@ invoke> a smiling dog eating a hotdog -I 000025.2182095108.png -tm cat
|
|||||||
|
|
||||||
### Escaping parantheses () and speech marks ""
|
### Escaping parantheses () and speech marks ""
|
||||||
|
|
||||||
If the model you are using has parentheses () or speech marks "" as
|
If the model you are using has parentheses () or speech marks "" as part of its
|
||||||
part of its syntax, you will need to "escape" these using a backslash,
|
syntax, you will need to "escape" these using a backslash, so that`(my_keyword)`
|
||||||
so that`(my_keyword)` becomes `\(my_keyword\)`. Otherwise, the prompt
|
becomes `\(my_keyword\)`. Otherwise, the prompt parser will attempt to interpret
|
||||||
parser will attempt to interpret the parentheses as part of the prompt
|
the parentheses as part of the prompt syntax and it will get confused.
|
||||||
syntax and it will get confused.
|
|
||||||
|
---
|
||||||
|
|
||||||
## **Prompt Blending**
|
## **Prompt Blending**
|
||||||
|
|
||||||
You may blend together different sections of the prompt to explore the
|
You may blend together different sections of the prompt to explore the AI's
|
||||||
AI's latent semantic space and generate interesting (and often
|
latent semantic space and generate interesting (and often surprising!)
|
||||||
surprising!) variations. The syntax is:
|
variations. The syntax is:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
blue sphere:0.25 red cube:0.75 hybrid
|
blue sphere:0.25 red cube:0.75 hybrid
|
||||||
```
|
```
|
||||||
|
|
||||||
This will tell the sampler to blend 25% of the concept of a blue
|
This will tell the sampler to blend 25% of the concept of a blue sphere with 75%
|
||||||
sphere with 75% of the concept of a red cube. The blend weights can
|
of the concept of a red cube. The blend weights can use any combination of
|
||||||
use any combination of integers and floating point numbers, and they
|
integers and floating point numbers, and they do not need to add up to 1.
|
||||||
do not need to add up to 1. Everything to the left of the `:XX` up to
|
Everything to the left of the `:XX` up to the previous `:XX` is used for
|
||||||
the previous `:XX` is used for merging, so the overall effect is:
|
merging, so the overall effect is:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
0.25 * "blue sphere" + 0.75 * "white duck" + hybrid
|
0.25 * "blue sphere" + 0.75 * "white duck" + hybrid
|
||||||
```
|
```
|
||||||
|
|
||||||
Because you are exploring the "mind" of the AI, the AI's way of mixing
|
Because you are exploring the "mind" of the AI, the AI's way of mixing two
|
||||||
two concepts may not match yours, leading to surprising effects. To
|
concepts may not match yours, leading to surprising effects. To illustrate, here
|
||||||
illustrate, here are three images generated using various combinations
|
are three images generated using various combinations of blend weights. As
|
||||||
of blend weights. As usual, unless you fix the seed, the prompts will give you
|
usual, unless you fix the seed, the prompts will give you different results each
|
||||||
different results each time you run them.
|
time you run them.
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
### "blue sphere, red cube, hybrid"
|
### "blue sphere, red cube, hybrid"
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
This example doesn't use melding at all and represents the default way
|
This example doesn't use melding at all and represents the default way of mixing
|
||||||
of mixing concepts.
|
concepts.
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
It's interesting to see how the AI expressed the concept of "cube" as
|
It's interesting to see how the AI expressed the concept of "cube" as the four
|
||||||
the four quadrants of the enclosing frame. If you look closely, there
|
quadrants of the enclosing frame. If you look closely, there is depth there, so
|
||||||
is depth there, so the enclosing frame is actually a cube.
|
the enclosing frame is actually a cube.
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
### "blue sphere:0.25 red cube:0.75 hybrid"
|
### "blue sphere:0.25 red cube:0.75 hybrid"
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
Now that's interesting. We get neither a blue sphere nor a red cube,
|
Now that's interesting. We get neither a blue sphere nor a red cube, but a red
|
||||||
but a red sphere embedded in a brick wall, which represents a melding
|
sphere embedded in a brick wall, which represents a melding of concepts within
|
||||||
of concepts within the AI's "latent space" of semantic
|
the AI's "latent space" of semantic representations. Where is Ludwig
|
||||||
representations. Where is Ludwig Wittgenstein when you need him?
|
Wittgenstein when you need him?
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
### "blue sphere:0.75 red cube:0.25 hybrid"
|
### "blue sphere:0.75 red cube:0.25 hybrid"
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
Definitely more blue-spherey. The cube is gone entirely, but it's
|
Definitely more blue-spherey. The cube is gone entirely, but it's really cool
|
||||||
really cool abstract art.
|
abstract art.
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
### "blue sphere:0.5 red cube:0.5 hybrid"
|
### "blue sphere:0.5 red cube:0.5 hybrid"
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
Whoa...! I see blue and red, but no spheres or cubes. Is the word
|
Whoa...! I see blue and red, but no spheres or cubes. Is the word "hybrid"
|
||||||
"hybrid" summoning up the concept of some sort of scifi creature?
|
summoning up the concept of some sort of scifi creature? Let's find out.
|
||||||
Let's find out.
|
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
### "blue sphere:0.5 red cube:0.5"
|
### "blue sphere:0.5 red cube:0.5"
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
Indeed, removing the word "hybrid" produces an image that is more like
|
Indeed, removing the word "hybrid" produces an image that is more like what we'd
|
||||||
what we'd expect.
|
expect.
|
||||||
|
|
||||||
In conclusion, prompt blending is great for exploring creative space,
|
In conclusion, prompt blending is great for exploring creative space, but can be
|
||||||
but can be difficult to direct. A forthcoming release of InvokeAI will
|
difficult to direct. A forthcoming release of InvokeAI will feature more
|
||||||
feature more deterministic prompt weighting.
|
deterministic prompt weighting.
|
||||||
|
@ -16,12 +16,10 @@ You are able to do the following:
|
|||||||
2. Given two or more variations that you like, you can combine them in a
|
2. Given two or more variations that you like, you can combine them in a
|
||||||
weighted fashion.
|
weighted fashion.
|
||||||
|
|
||||||
---
|
!!! Information ""
|
||||||
|
|
||||||
This cheat sheet provides a quick guide for how this works in practice, using
|
This cheat sheet provides a quick guide for how this works in practice, using
|
||||||
variations to create the desired image of Xena, Warrior Princess.
|
variations to create the desired image of Xena, Warrior Princess.
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Step 1 -- Find a base image that you like
|
## Step 1 -- Find a base image that you like
|
||||||
|
|
||||||
|
@ -4,56 +4,55 @@ title: InvokeAI Web Server
|
|||||||
|
|
||||||
# :material-web: InvokeAI Web Server
|
# :material-web: InvokeAI Web Server
|
||||||
|
|
||||||
As of version 2.0.0, this distribution comes with a full-featured web
|
As of version 2.0.0, this distribution comes with a full-featured web server
|
||||||
server (see screenshot). To use it, run the `invoke.py` script by
|
(see screenshot). To use it, run the `invoke.py` script by adding the `--web`
|
||||||
adding the `--web` option:
|
option:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web
|
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web
|
||||||
```
|
```
|
||||||
|
|
||||||
You can then connect to the server by pointing your web browser at
|
You can then connect to the server by pointing your web browser at
|
||||||
http://localhost:9090. To reach the server from a different machine on
|
http://localhost:9090. To reach the server from a different machine on your LAN,
|
||||||
your LAN, you may launch the web server with the `--host` argument and
|
you may launch the web server with the `--host` argument and either the IP
|
||||||
either the IP address of the host you are running it on, or the
|
address of the host you are running it on, or the wildcard `0.0.0.0`. For
|
||||||
wildcard `0.0.0.0`. For example:
|
example:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web --host 0.0.0.0
|
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web --host 0.0.0.0
|
||||||
```
|
```
|
||||||
|
|
||||||
# Quick guided walkthrough of the WebGUI's features
|
## Quick guided walkthrough of the WebGUI's features
|
||||||
|
|
||||||
While most of the WebGUI's features are intuitive, here is a guided
|
While most of the WebGUI's features are intuitive, here is a guided walkthrough
|
||||||
walkthrough through its various components.
|
through its various components.
|
||||||
|
|
||||||
{:width="640px"}
|
{:width="640px"}
|
||||||
|
|
||||||
The screenshot above shows the Text to Image tab of the WebGUI. There
|
The screenshot above shows the Text to Image tab of the WebGUI. There are three
|
||||||
are three main sections:
|
main sections:
|
||||||
|
|
||||||
1. A **control panel** on the left, which contains various settings
|
1. A **control panel** on the left, which contains various settings for text to
|
||||||
for text to image generation. The most important part is the text
|
image generation. The most important part is the text field (currently
|
||||||
field (currently showing `strawberry sushi`) for entering the text
|
showing `strawberry sushi`) for entering the text prompt, and the camera icon
|
||||||
prompt, and the camera icon directly underneath that will render the
|
directly underneath that will render the image. We'll call this the _Invoke_
|
||||||
image. We'll call this the *Invoke* button from now on.
|
button from now on.
|
||||||
|
|
||||||
2. The **current image** section in the middle, which shows a large
|
2. The **current image** section in the middle, which shows a large format
|
||||||
format version of the image you are currently working on. A series of
|
version of the image you are currently working on. A series of buttons at the
|
||||||
buttons at the top ("image to image", "Use All", "Use Seed", etc) lets
|
top ("image to image", "Use All", "Use Seed", etc) lets you modify the image
|
||||||
you modify the image in various ways.
|
in various ways.
|
||||||
|
|
||||||
3. A **gallery* section on the left that contains a history of the
|
3. A \*_gallery_ section on the left that contains a history of the images you
|
||||||
images you have generated. These images are read and written to the
|
have generated. These images are read and written to the directory specified
|
||||||
directory specified at launch time in `--outdir`.
|
at launch time in `--outdir`.
|
||||||
|
|
||||||
In addition to these three elements, there are a series of icons for
|
In addition to these three elements, there are a series of icons for changing
|
||||||
changing global settings, reporting bugs, and changing the theme on
|
global settings, reporting bugs, and changing the theme on the upper right.
|
||||||
the upper right.
|
|
||||||
|
|
||||||
There are also a series of icons to the left of the control panel (see
|
There are also a series of icons to the left of the control panel (see
|
||||||
highlighted area in the screenshot below) which select among a series
|
highlighted area in the screenshot below) which select among a series of tabs
|
||||||
of tabs for performing different types of operations.
|
for performing different types of operations.
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
{:width="512px"}
|
{:width="512px"}
|
||||||
@ -61,174 +60,167 @@ of tabs for performing different types of operations.
|
|||||||
|
|
||||||
From top to bottom, these are:
|
From top to bottom, these are:
|
||||||
|
|
||||||
1. Text to Image - generate images from text
|
1. Text to Image - generate images from text
|
||||||
2. Image to Image - from an uploaded starting image (drawing or photograph) generate a new one, modified by the text prompt
|
2. Image to Image - from an uploaded starting image (drawing or photograph)
|
||||||
3. Inpainting (pending) - Interactively erase portions of a starting image and have the AI fill in the erased region from a text prompt.
|
generate a new one, modified by the text prompt
|
||||||
4. Outpainting (pending) - Interactively add blank space to the borders of a starting image and fill in the background from a text prompt.
|
3. Inpainting (pending) - Interactively erase portions of a starting image and
|
||||||
5. Postprocessing (pending) - Interactively postprocess generated images using a variety of filters.
|
have the AI fill in the erased region from a text prompt.
|
||||||
|
4. Outpainting (pending) - Interactively add blank space to the borders of a
|
||||||
|
starting image and fill in the background from a text prompt.
|
||||||
|
5. Postprocessing (pending) - Interactively postprocess generated images using a
|
||||||
|
variety of filters.
|
||||||
|
|
||||||
The inpainting, outpainting and postprocessing tabs are currently in
|
The inpainting, outpainting and postprocessing tabs are currently in
|
||||||
development. However, limited versions of their features can already
|
development. However, limited versions of their features can already be accessed
|
||||||
be accessed through the Text to Image and Image to Image tabs.
|
through the Text to Image and Image to Image tabs.
|
||||||
|
|
||||||
## Walkthrough
|
## Walkthrough
|
||||||
|
|
||||||
The following walkthrough will exercise most (but not all) of the
|
The following walkthrough will exercise most (but not all) of the WebGUI's
|
||||||
WebGUI's feature set.
|
feature set.
|
||||||
|
|
||||||
### Text to Image
|
### Text to Image
|
||||||
|
|
||||||
1. Launch the WebGUI using `python scripts/invoke.py --web` and
|
1. Launch the WebGUI using `python scripts/invoke.py --web` and connect to it
|
||||||
connect to it with your browser by accessing
|
with your browser by accessing `http://localhost:9090`. If the browser and
|
||||||
`http://localhost:9090`. If the browser and server are running on
|
server are running on different machines on your LAN, add the option
|
||||||
different machines on your LAN, add the option `--host 0.0.0.0` to the
|
`--host 0.0.0.0` to the launch command line and connect to the machine
|
||||||
launch command line and connect to the machine hosting the web server
|
hosting the web server using its IP address or domain name.
|
||||||
using its IP address or domain name.
|
|
||||||
|
|
||||||
2. If all goes well, the WebGUI should come up and you'll see a green
|
2. If all goes well, the WebGUI should come up and you'll see a green
|
||||||
`connected` message on the upper right.
|
`connected` message on the upper right.
|
||||||
|
|
||||||
#### Basics
|
#### Basics
|
||||||
|
|
||||||
1. Generate an image by typing *strawberry sushi* into the large
|
1. Generate an image by typing _strawberry sushi_ into the large prompt field
|
||||||
prompt field on the upper left and then clicking on the Invoke button
|
on the upper left and then clicking on the Invoke button (the one with the
|
||||||
(the one with the Camera icon). After a short wait, you'll see a large
|
Camera icon). After a short wait, you'll see a large image of sushi in the
|
||||||
image of sushi in the image panel, and a new thumbnail in the gallery
|
image panel, and a new thumbnail in the gallery on the right.
|
||||||
on the right.
|
|
||||||
|
|
||||||
If you need more room on the screen, you can turn the gallery off
|
If you need more room on the screen, you can turn the gallery off by
|
||||||
by clicking on the **x** to the right of "Your Invocations". You can
|
clicking on the **x** to the right of "Your Invocations". You can turn it
|
||||||
turn it back on later by clicking the image icon that appears in the
|
back on later by clicking the image icon that appears in the gallery's
|
||||||
gallery's place.
|
place.
|
||||||
|
|
||||||
The images are written into the directory indicated by the `--outdir`
|
The images are written into the directory indicated by the `--outdir` option
|
||||||
option provided at script launch time. By default, this is
|
provided at script launch time. By default, this is `outputs/img-samples`
|
||||||
`outputs/img-samples` under the InvokeAI directory.
|
under the InvokeAI directory.
|
||||||
|
|
||||||
2. Generate a bunch of strawberry sushi images by increasing the
|
2. Generate a bunch of strawberry sushi images by increasing the number of
|
||||||
number of requested images by adjusting the Images counter just below
|
requested images by adjusting the Images counter just below the Camera
|
||||||
the Camera button. As each is generated, it will be added to the
|
button. As each is generated, it will be added to the gallery. You can
|
||||||
gallery. You can switch the active image by clicking on the gallery
|
switch the active image by clicking on the gallery thumbnails.
|
||||||
thumbnails.
|
|
||||||
|
|
||||||
3. Try playing with different settings, including image width and
|
3. Try playing with different settings, including image width and height, the
|
||||||
height, the Sampler, the Steps and the CFG scale.
|
Sampler, the Steps and the CFG scale.
|
||||||
|
|
||||||
Image *Width* and *Height* do what you'd expect. However, be aware that
|
Image _Width_ and _Height_ do what you'd expect. However, be aware that
|
||||||
larger images consume more VRAM memory and take longer to generate.
|
larger images consume more VRAM memory and take longer to generate.
|
||||||
|
|
||||||
The *Sampler* controls how the AI selects the image to display. Some
|
The _Sampler_ controls how the AI selects the image to display. Some
|
||||||
samplers are more "creative" than others and will produce a wider
|
samplers are more "creative" than others and will produce a wider range of
|
||||||
range of variations (see next section). Some samplers run faster than
|
variations (see next section). Some samplers run faster than others.
|
||||||
others.
|
|
||||||
|
|
||||||
*Steps* controls how many noising/denoising/sampling steps the AI will
|
_Steps_ controls how many noising/denoising/sampling steps the AI will take.
|
||||||
take. The higher this value, the more refined the image will be, but
|
The higher this value, the more refined the image will be, but the longer
|
||||||
the longer the image will take to generate. A typical strategy is to
|
the image will take to generate. A typical strategy is to generate images
|
||||||
generate images with a low number of steps in order to select one to
|
with a low number of steps in order to select one to work on further, and
|
||||||
work on further, and then regenerate it using a higher number of
|
then regenerate it using a higher number of steps.
|
||||||
steps.
|
|
||||||
|
|
||||||
The *CFG Scale* controls how hard the AI tries to match the generated
|
The _CFG Scale_ controls how hard the AI tries to match the generated image
|
||||||
image to the input prompt. You can go as high or low as you like, but
|
to the input prompt. You can go as high or low as you like, but generally
|
||||||
generally values greater than 20 won't improve things much, and values
|
values greater than 20 won't improve things much, and values lower than 5
|
||||||
lower than 5 will produce unexpected images. There are complex
|
will produce unexpected images. There are complex interactions between
|
||||||
interactions between *Steps*, *CFG Scale* and the *Sampler*, so
|
_Steps_, _CFG Scale_ and the _Sampler_, so experiment to find out what works
|
||||||
experiment to find out what works for you.
|
for you.
|
||||||
|
|
||||||
6. To regenerate a previously-generated image, select the image you
|
4. To regenerate a previously-generated image, select the image you want and
|
||||||
want and click *Use All*. This loads the text prompt and other
|
click _Use All_. This loads the text prompt and other original settings into
|
||||||
original settings into the control panel. If you then press *Invoke*
|
the control panel. If you then press _Invoke_ it will regenerate the image
|
||||||
it will regenerate the image exactly. You can also selectively modify
|
exactly. You can also selectively modify the prompt or other settings to
|
||||||
the prompt or other settings to tweak the image.
|
tweak the image.
|
||||||
|
|
||||||
Alternatively, you may click on *Use Seed* to load just the image's
|
Alternatively, you may click on _Use Seed_ to load just the image's seed,
|
||||||
seed, and leave other settings unchanged.
|
and leave other settings unchanged.
|
||||||
|
|
||||||
7. To regenerate a Stable Diffusion image that was generated by
|
5. To regenerate a Stable Diffusion image that was generated by another SD
|
||||||
another SD package, you need to know its text prompt and its
|
package, you need to know its text prompt and its _Seed_. Copy-paste the
|
||||||
*Seed*. Copy-paste the prompt into the prompt box, unset the
|
prompt into the prompt box, unset the _Randomize Seed_ control in the
|
||||||
*Randomize Seed* control in the control panel, and copy-paste the
|
control panel, and copy-paste the desired _Seed_ into its text field. When
|
||||||
desired *Seed* into its text field. When you Invoke, you will get
|
you Invoke, you will get something similar to the original image. It will
|
||||||
something similar to the original image. It will not be exact unless
|
not be exact unless you also set the correct values for the original
|
||||||
you also set the correct values for the original sampler, CFG,
|
sampler, CFG, steps and dimensions, but it will (usually) be close.
|
||||||
steps and dimensions, but it will (usually) be close.
|
|
||||||
|
|
||||||
#### Variations on a theme
|
#### Variations on a theme
|
||||||
|
|
||||||
1. Let's try generating some variations. Select your favorite sushi
|
1. Let's try generating some variations. Select your favorite sushi image from
|
||||||
image from the gallery to load it. Then select "Use All" from the list
|
the gallery to load it. Then select "Use All" from the list of buttons
|
||||||
of buttons above. This will load up all the settings used to generate
|
above. This will load up all the settings used to generate this image,
|
||||||
this image, including its unique seed.
|
including its unique seed.
|
||||||
|
|
||||||
Go down to the Variations section of the Control Panel and set the
|
Go down to the Variations section of the Control Panel and set the button to
|
||||||
button to On. Set Variation Amount to 0.2 to generate a modest
|
On. Set Variation Amount to 0.2 to generate a modest number of variations on
|
||||||
number of variations on the image, and also set the Image counter to
|
the image, and also set the Image counter to `4`. Press the `invoke` button.
|
||||||
`4`. Press the `invoke` button. This will generate a series of related
|
This will generate a series of related images. To obtain smaller variations,
|
||||||
images. To obtain smaller variations, just lower the Variation
|
just lower the Variation Amount. You may also experiment with changing the
|
||||||
Amount. You may also experiment with changing the Sampler. Some
|
Sampler. Some samplers generate more variability than others. _k_euler_a_ is
|
||||||
samplers generate more variability than others. *k_euler_a* is
|
particularly creative, while _ddim_ is pretty conservative.
|
||||||
particularly creative, while *ddim* is pretty conservative.
|
|
||||||
|
|
||||||
2. For even more variations, experiment with increasing the setting
|
2. For even more variations, experiment with increasing the setting for
|
||||||
for *Perlin*. This adds a bit of noise to the image generation
|
_Perlin_. This adds a bit of noise to the image generation process. Note
|
||||||
process. Note that values of Perlin noise greater than 0.15 produce
|
that values of Perlin noise greater than 0.15 produce poor images for
|
||||||
poor images for several of the samplers.
|
several of the samplers.
|
||||||
|
|
||||||
#### Facial reconstruction and upscaling
|
#### Facial reconstruction and upscaling
|
||||||
|
|
||||||
Stable Diffusion frequently produces mangled faces, particularly when
|
Stable Diffusion frequently produces mangled faces, particularly when there are
|
||||||
there are multiple figures in the same scene. Stable Diffusion has
|
multiple figures in the same scene. Stable Diffusion has particular issues with
|
||||||
particular issues with generating reallistic eyes. InvokeAI provides
|
generating reallistic eyes. InvokeAI provides the ability to reconstruct faces
|
||||||
the ability to reconstruct faces using either the GFPGAN or CodeFormer
|
using either the GFPGAN or CodeFormer libraries. For more information see
|
||||||
libraries. For more information see [POSTPROCESS](POSTPROCESS.md).
|
[POSTPROCESS](POSTPROCESS.md).
|
||||||
|
|
||||||
1. Invoke a prompt that generates a mangled face. A prompt that often
|
1. Invoke a prompt that generates a mangled face. A prompt that often gives
|
||||||
gives this is "portrait of a lawyer, 3/4 shot" (this is not intended
|
this is "portrait of a lawyer, 3/4 shot" (this is not intended as a slur
|
||||||
as a slur against lawyers!) Once you have an image that needs some
|
against lawyers!) Once you have an image that needs some touching up, load
|
||||||
touching up, load it into the Image panel, and press the button with
|
it into the Image panel, and press the button with the face icon
|
||||||
the face icon (highlighted in the first screenshot below). A dialog
|
(highlighted in the first screenshot below). A dialog box will appear. Leave
|
||||||
box will appear. Leave *Strength* at 0.8 and press *Restore Faces". If
|
_Strength_ at 0.8 and press \*Restore Faces". If all goes well, the eyes and
|
||||||
all goes well, the eyes and other aspects of the face will be improved
|
other aspects of the face will be improved (see the second screenshot)
|
||||||
(see the second screenshot)
|
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
The facial reconstruction *Strength* field adjusts how aggressively
|
The facial reconstruction _Strength_ field adjusts how aggressively the face
|
||||||
the face library will try to alter the face. It can be as high as 1.0,
|
library will try to alter the face. It can be as high as 1.0, but be aware
|
||||||
but be aware that this often softens the face airbrush style, losing
|
that this often softens the face airbrush style, losing some details. The
|
||||||
some details. The default 0.8 is usually sufficient.
|
default 0.8 is usually sufficient.
|
||||||
|
|
||||||
2. "Upscaling" is the process of increasing the size of an image while
|
2. "Upscaling" is the process of increasing the size of an image while
|
||||||
retaining the sharpness. InvokeAI uses an external library called
|
retaining the sharpness. InvokeAI uses an external library called "ESRGAN"
|
||||||
"ESRGAN" to do this. To invoke upscaling, simply select an image and
|
to do this. To invoke upscaling, simply select an image and press the _HD_
|
||||||
press the *HD* button above it. You can select between 2X and 4X
|
button above it. You can select between 2X and 4X upscaling, and adjust the
|
||||||
upscaling, and adjust the upscaling strength, which has much the same
|
upscaling strength, which has much the same meaning as in facial
|
||||||
meaning as in facial reconstruction. Try running this on one of your
|
reconstruction. Try running this on one of your previously-generated images.
|
||||||
previously-generated images.
|
|
||||||
|
|
||||||
3. Finally, you can run facial reconstruction and/or upscaling
|
3. Finally, you can run facial reconstruction and/or upscaling automatically
|
||||||
automatically after each Invocation. Go to the Advanced Options
|
after each Invocation. Go to the Advanced Options section of the Control
|
||||||
section of the Control Panel and turn on *Restore Face* and/or
|
Panel and turn on _Restore Face_ and/or _Upscale_.
|
||||||
*Upscale*.
|
|
||||||
|
|
||||||
### Image to Image
|
### Image to Image
|
||||||
|
|
||||||
InvokeAI lets you take an existing image and use it as the basis for a
|
InvokeAI lets you take an existing image and use it as the basis for a new
|
||||||
new creation. You can use any sort of image, including a photograph, a
|
creation. You can use any sort of image, including a photograph, a scanned
|
||||||
scanned sketch, or a digital drawing, as long as it is in PNG or JPEG
|
sketch, or a digital drawing, as long as it is in PNG or JPEG format.
|
||||||
format.
|
|
||||||
|
|
||||||
For this tutorial, we'll use files named
|
For this tutorial, we'll use files named
|
||||||
[Lincoln-and-Parrot-512.png](../assets/Lincoln-and-Parrot-512.png),
|
[Lincoln-and-Parrot-512.png](../assets/Lincoln-and-Parrot-512.png), and
|
||||||
and
|
|
||||||
[Lincoln-and-Parrot-512-transparent.png](../assets/Lincoln-and-Parrot-512-transparent.png).
|
[Lincoln-and-Parrot-512-transparent.png](../assets/Lincoln-and-Parrot-512-transparent.png).
|
||||||
Download these images to your local machine now to continue with the walkthrough.
|
Download these images to your local machine now to continue with the
|
||||||
|
walkthrough.
|
||||||
|
|
||||||
1. Click on the *Image to Image* tab icon, which is the second icon
|
1. Click on the _Image to Image_ tab icon, which is the second icon from the
|
||||||
from the top on the left-hand side of the screen:
|
top on the left-hand side of the screen:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||

|

|
||||||
@ -240,93 +232,92 @@ from the top on the left-hand side of the screen:
|
|||||||
{:width="640px"}
|
{:width="640px"}
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
2. Drag-and-drop the Lincoln-and-Parrot image into the Image panel, or
|
2. Drag-and-drop the Lincoln-and-Parrot image into the Image panel, or click
|
||||||
click the blank area to get an upload dialog. The image will load into
|
the blank area to get an upload dialog. The image will load into an area
|
||||||
an area marked *Initial Image*. (The WebGUI will also load the most
|
marked _Initial Image_. (The WebGUI will also load the most
|
||||||
recently-generated image from the gallery into a section on the left,
|
recently-generated image from the gallery into a section on the left, but
|
||||||
but this image will be replaced in the next step.)
|
this image will be replaced in the next step.)
|
||||||
|
|
||||||
3. Go to the prompt box and type *old sea captain with raven on
|
3. Go to the prompt box and type _old sea captain with raven on shoulder_ and
|
||||||
shoulder* and press Invoke. A derived image will appear to the right
|
press Invoke. A derived image will appear to the right of the original one:
|
||||||
of the original one:
|
|
||||||
|
|
||||||
{:width="640px"}
|
{:width="640px"}
|
||||||
|
|
||||||
4. Experiment with the different settings. The most influential one
|
4. Experiment with the different settings. The most influential one in Image to
|
||||||
in Image to Image is *Image to Image Strength* located about midway
|
Image is _Image to Image Strength_ located about midway down the control
|
||||||
down the control panel. By default it is set to 0.75, but can range
|
panel. By default it is set to 0.75, but can range from 0.0 to 0.99. The
|
||||||
from 0.0 to 0.99. The higher the value, the more of the original image
|
higher the value, the more of the original image the AI will replace. A
|
||||||
the AI will replace. A value of 0 will leave the initial image
|
value of 0 will leave the initial image completely unchanged, while 0.99
|
||||||
completely unchanged, while 0.99 will replace it completely. However,
|
will replace it completely. However, the Sampler and CFG Scale also
|
||||||
the Sampler and CFG Scale also influence the final result. You can
|
influence the final result. You can also generate variations in the same way
|
||||||
also generate variations in the same way as described in Text to
|
as described in Text to Image.
|
||||||
Image.
|
|
||||||
|
|
||||||
5. What if we only want to change certain part(s) of the image and
|
5. What if we only want to change certain part(s) of the image and leave the
|
||||||
leave the rest intact? This is called Inpainting, and a future version
|
rest intact? This is called Inpainting, and a future version of the InvokeAI
|
||||||
of the InvokeAI web server will provide an interactive painting canvas
|
web server will provide an interactive painting canvas on which you can
|
||||||
on which you can directly draw the areas you wish to Inpaint into. For
|
directly draw the areas you wish to Inpaint into. For now, you can achieve
|
||||||
now, you can achieve this effect by using an external photoeditor tool
|
this effect by using an external photoeditor tool to make one or more
|
||||||
to make one or more regions of the image transparent as described in
|
regions of the image transparent as described in [INPAINTING.md] and
|
||||||
[INPAINTING.md] and uploading that.
|
uploading that.
|
||||||
|
|
||||||
The file
|
The file
|
||||||
[Lincoln-and-Parrot-512-transparent.png](../assets/Lincoln-and-Parrot-512-transparent.png)
|
[Lincoln-and-Parrot-512-transparent.png](../assets/Lincoln-and-Parrot-512-transparent.png)
|
||||||
is a version of the earlier image in which the area around the parrot
|
is a version of the earlier image in which the area around the parrot has
|
||||||
has been replaced with transparency. Click on the "x" in the upper
|
been replaced with transparency. Click on the "x" in the upper right of the
|
||||||
right of the Initial Image and upload the transparent version. Using
|
Initial Image and upload the transparent version. Using the same prompt "old
|
||||||
the same prompt "old sea captain with raven on shoulder" try Invoking
|
sea captain with raven on shoulder" try Invoking an image. This time, only
|
||||||
an image. This time, only the parrot will be replaced, leaving the
|
the parrot will be replaced, leaving the rest of the original image intact:
|
||||||
rest of the original image intact:
|
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
{:width="640px"}
|
{:width="640px"}
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
6. Would you like to modify a previously-generated image using the
|
6. Would you like to modify a previously-generated image using the Image to
|
||||||
Image to Image facility? Easy! While in the Image to Image panel,
|
Image facility? Easy! While in the Image to Image panel, hover over any of
|
||||||
hover over any of the gallery images to see a little menu of icons pop
|
the gallery images to see a little menu of icons pop up. Click the picture
|
||||||
up. Click the picture icon to instantly send the selected image to
|
icon to instantly send the selected image to Image to Image as the initial
|
||||||
Image to Image as the initial image.
|
image.
|
||||||
|
|
||||||
You can do the same from the Text to Image tab by clicking on the
|
You can do the same from the Text to Image tab by clicking on the picture icon
|
||||||
picture icon above the central image panel. The screenshot below
|
above the central image panel. The screenshot below shows where the "use as
|
||||||
shows where the "use as initial image" icons are located.
|
initial image" icons are located.
|
||||||
|
|
||||||
{:width="640px"}
|
{:width="640px"}
|
||||||
|
|
||||||
## Parting remarks
|
## Parting remarks
|
||||||
|
|
||||||
This concludes the walkthrough, but there are several more features that you
|
This concludes the walkthrough, but there are several more features that you can
|
||||||
can explore. Please check out the [Command Line Interface](CLI.md)
|
explore. Please check out the [Command Line Interface](CLI.md) documentation for
|
||||||
documentation for further explanation of the advanced features that
|
further explanation of the advanced features that were not covered here.
|
||||||
were not covered here.
|
|
||||||
|
|
||||||
The WebGUI is only rapid development. Check back regularly for
|
The WebGUI is only rapid development. Check back regularly for updates!
|
||||||
updates!
|
|
||||||
|
|
||||||
## Reference
|
## Reference
|
||||||
|
|
||||||
### Additional Options
|
### Additional Options
|
||||||
|
|
||||||
parameter <img width=160 align="right"> | effect
|
| parameter <img width=160 align="right"> | effect |
|
||||||
-- | --
|
| --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||||
`--web_develop` | Starts the web server in development mode.
|
| `--web_develop` | Starts the web server in development mode. |
|
||||||
`--web_verbose` | Enables verbose logging
|
| `--web_verbose` | Enables verbose logging |
|
||||||
`--cors [CORS ...]` | Additional allowed origins, comma-separated
|
| `--cors [CORS ...]` | Additional allowed origins, comma-separated |
|
||||||
`--host HOST` | Web server: Host or IP to listen on. Set to 0.0.0.0 to accept traffic from other devices on your network.
|
| `--host HOST` | Web server: Host or IP to listen on. Set to 0.0.0.0 to accept traffic from other devices on your network. |
|
||||||
`--port PORT` | Web server: Port to listen on
|
| `--port PORT` | Web server: Port to listen on |
|
||||||
`--gui` | Start InvokeAI GUI - This is the "desktop mode" version of the web app. It uses Flask to create a desktop app experience of the webserver.
|
| `--gui` | Start InvokeAI GUI - This is the "desktop mode" version of the web app. It uses Flask to create a desktop app experience of the webserver. |
|
||||||
|
|
||||||
### Web Specific Features
|
### Web Specific Features
|
||||||
|
|
||||||
The web experience offers an incredibly easy-to-use experience for interacting with the InvokeAI toolkit.
|
The web experience offers an incredibly easy-to-use experience for interacting
|
||||||
For detailed guidance on individual features, see the Feature-specific help documents available in this directory.
|
with the InvokeAI toolkit. For detailed guidance on individual features, see the
|
||||||
Note that the latest functionality available in the CLI may not always be available in the Web interface.
|
Feature-specific help documents available in this directory. Note that the
|
||||||
|
latest functionality available in the CLI may not always be available in the Web
|
||||||
|
interface.
|
||||||
|
|
||||||
#### Dark Mode & Light Mode
|
#### Dark Mode & Light Mode
|
||||||
|
|
||||||
The InvokeAI interface is available in a nano-carbon black & purple Dark Mode, and a "burn your eyes out Nosferatu" Light Mode. These can be toggled by clicking the Sun/Moon icons at the top right of the interface.
|
The InvokeAI interface is available in a nano-carbon black & purple Dark Mode,
|
||||||
|
and a "burn your eyes out Nosferatu" Light Mode. These can be toggled by
|
||||||
|
clicking the Sun/Moon icons at the top right of the interface.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
@ -334,7 +325,10 @@ The InvokeAI interface is available in a nano-carbon black & purple Dark Mode, a
|
|||||||
|
|
||||||
#### Invocation Toolbar
|
#### Invocation Toolbar
|
||||||
|
|
||||||
The left side of the InvokeAI interface is available for customizing the prompt and the settings used for invoking your new image. Typing your prompt into the open text field and clicking the Invoke button will produce the image based on the settings configured in the toolbar.
|
The left side of the InvokeAI interface is available for customizing the prompt
|
||||||
|
and the settings used for invoking your new image. Typing your prompt into the
|
||||||
|
open text field and clicking the Invoke button will produce the image based on
|
||||||
|
the settings configured in the toolbar.
|
||||||
|
|
||||||
See below for additional documentation related to each feature:
|
See below for additional documentation related to each feature:
|
||||||
|
|
||||||
@ -347,11 +341,17 @@ See below for additional documentation related to each feature:
|
|||||||
|
|
||||||
#### Invocation Gallery
|
#### Invocation Gallery
|
||||||
|
|
||||||
The currently selected --outdir (or the default outputs folder) will display all previously generated files on load. As new invocations are generated, these will be dynamically added to the gallery, and can be previewed by selecting them. Each image also has a simple set of actions (e.g., Delete, Use Seed, Use All Parameters, etc.) that can be accessed by hovering over the image.
|
The currently selected --outdir (or the default outputs folder) will display all
|
||||||
|
previously generated files on load. As new invocations are generated, these will
|
||||||
|
be dynamically added to the gallery, and can be previewed by selecting them.
|
||||||
|
Each image also has a simple set of actions (e.g., Delete, Use Seed, Use All
|
||||||
|
Parameters, etc.) that can be accessed by hovering over the image.
|
||||||
|
|
||||||
#### Image Workspace
|
#### Image Workspace
|
||||||
|
|
||||||
When an image from the Invocation Gallery is selected, or is generated, the image will be displayed within the center of the interface. A quickbar of common image interactions are displayed along the top of the image, including:
|
When an image from the Invocation Gallery is selected, or is generated, the
|
||||||
|
image will be displayed within the center of the interface. A quickbar of common
|
||||||
|
image interactions are displayed along the top of the image, including:
|
||||||
|
|
||||||
- Use image in the `Image to Image` workflow
|
- Use image in the `Image to Image` workflow
|
||||||
- Initialize Face Restoration on the selected file
|
- Initialize Face Restoration on the selected file
|
||||||
@ -361,9 +361,9 @@ When an image from the Invocation Gallery is selected, or is generated, the imag
|
|||||||
|
|
||||||
## Acknowledgements
|
## Acknowledgements
|
||||||
|
|
||||||
A huge shout-out to the core team working to make this vision a
|
A huge shout-out to the core team working to make this vision a reality,
|
||||||
reality, including
|
including [psychedelicious](https://github.com/psychedelicious),
|
||||||
[psychedelicious](https://github.com/psychedelicious),
|
|
||||||
[Kyle0654](https://github.com/Kyle0654) and
|
[Kyle0654](https://github.com/Kyle0654) and
|
||||||
[blessedcoolant](https://github.com/blessedcoolant). [hipsterusername](https://github.com/hipsterusername)
|
[blessedcoolant](https://github.com/blessedcoolant).
|
||||||
was the team's unofficial cheerleader and added tooltips/docs.
|
[hipsterusername](https://github.com/hipsterusername) was the team's unofficial
|
||||||
|
cheerleader and added tooltips/docs.
|
||||||
|
@ -1,58 +1,62 @@
|
|||||||
# **WebUI Hotkey List**
|
---
|
||||||
|
title: WebUI Hotkey List
|
||||||
|
---
|
||||||
|
|
||||||
|
# :material-keyboard: **WebUI Hotkey List**
|
||||||
|
|
||||||
## General
|
## General
|
||||||
|
|
||||||
| Setting | Hotkey |
|
| Setting | Hotkey |
|
||||||
| ------------ | ---------------------- |
|
| ----------------- | ---------------------- |
|
||||||
| a | Set All Parameters |
|
| ++a++ | Set All Parameters |
|
||||||
| s | Set Seed |
|
| ++s++ | Set Seed |
|
||||||
| u | Upscale |
|
| ++u++ | Upscale |
|
||||||
| r | Restoration |
|
| ++r++ | Restoration |
|
||||||
| i | Show Metadata |
|
| ++i++ | Show Metadata |
|
||||||
| Ddl | Delete Image |
|
| ++d++ ++d++ ++l++ | Delete Image |
|
||||||
| alt + a | Focus prompt input |
|
| ++alt+a++ | Focus prompt input |
|
||||||
| shift + i | Send To Image to Image |
|
| ++shift+i++ | Send To Image to Image |
|
||||||
| ctrl + enter | Start processing |
|
| ++ctrl+enter++ | Start processing |
|
||||||
| shift + x | cancel Processing |
|
| ++shift+x++ | cancel Processing |
|
||||||
| shift + d | Toggle Dark Mode |
|
| ++shift+d++ | Toggle Dark Mode |
|
||||||
| ` | Toggle console |
|
| ++"`"++ | Toggle console |
|
||||||
|
|
||||||
## Tabs
|
## Tabs
|
||||||
|
|
||||||
| Setting | Hotkey |
|
| Setting | Hotkey |
|
||||||
| ------- | ------------------------- |
|
| ------- | ------------------------- |
|
||||||
| 1 | Go to Text To Image Tab |
|
| ++1++ | Go to Text To Image Tab |
|
||||||
| 2 | Go to Image to Image Tab |
|
| ++2++ | Go to Image to Image Tab |
|
||||||
| 3 | Go to Inpainting Tab |
|
| ++3++ | Go to Inpainting Tab |
|
||||||
| 4 | Go to Outpainting Tab |
|
| ++4++ | Go to Outpainting Tab |
|
||||||
| 5 | Go to Nodes Tab |
|
| ++5++ | Go to Nodes Tab |
|
||||||
| 6 | Go to Post Processing Tab |
|
| ++6++ | Go to Post Processing Tab |
|
||||||
|
|
||||||
## Gallery
|
## Gallery
|
||||||
|
|
||||||
| Setting | Hotkey |
|
| Setting | Hotkey |
|
||||||
| ------------ | ------------------------------- |
|
| -------------- | ------------------------------- |
|
||||||
| g | Toggle Gallery |
|
| ++g++ | Toggle Gallery |
|
||||||
| left arrow | Go to previous image in gallery |
|
| ++left++ | Go to previous image in gallery |
|
||||||
| right arrow | Go to next image in gallery |
|
| ++right++ | Go to next image in gallery |
|
||||||
| shift + p | Pin gallery |
|
| ++shift+p++ | Pin gallery |
|
||||||
| shift + up | Increase gallery image size |
|
| ++shift+up++ | Increase gallery image size |
|
||||||
| shift + down | Decrease gallery image size |
|
| ++shift+down++ | Decrease gallery image size |
|
||||||
| shift + r | Reset image gallery size |
|
| ++shift+r++ | Reset image gallery size |
|
||||||
|
|
||||||
## Inpainting
|
## Inpainting
|
||||||
|
|
||||||
| Setting | Hotkey |
|
| Setting | Hotkey |
|
||||||
| -------------------------- | --------------------- |
|
| ---------------------------- | --------------------- |
|
||||||
| [ | Decrease brush size |
|
| ++"["++ | Decrease brush size |
|
||||||
| ] | Increase brush size |
|
| ++"]"++ | Increase brush size |
|
||||||
| alt + [ | Decrease mask opacity |
|
| ++alt+"["++ | Decrease mask opacity |
|
||||||
| alt + ] | Increase mask opacity |
|
| ++alt+"]"++ | Increase mask opacity |
|
||||||
| b | Select brush |
|
| ++b++ | Select brush |
|
||||||
| e | Select eraser |
|
| ++e++ | Select eraser |
|
||||||
| ctrl + z | Undo brush stroke |
|
| ++ctrl+z++ | Undo brush stroke |
|
||||||
| ctrl + shift + z, ctrl + y | Redo brush stroke |
|
| ++ctrl+shift+z++, ++ctrl+y++ | Redo brush stroke |
|
||||||
| h | Hide mask |
|
| ++h++ | Hide mask |
|
||||||
| shift + m | Invert mask |
|
| ++shift+m++ | Invert mask |
|
||||||
| shift + c | Clear mask |
|
| ++shift+c++ | Clear mask |
|
||||||
| shift + j | Expand canvas |
|
| ++shift+j++ | Expand canvas |
|
||||||
|
@ -1,27 +1,25 @@
|
|||||||
---
|
---
|
||||||
title: F.A.Q.
|
title: F.A.Q.
|
||||||
hide:
|
|
||||||
- toc
|
|
||||||
---
|
---
|
||||||
|
|
||||||
# :material-frequently-asked-questions: F.A.Q.
|
# :material-frequently-asked-questions: F.A.Q.
|
||||||
|
|
||||||
## **Frequently-Asked-Questions**
|
## **Frequently-Asked-Questions**
|
||||||
|
|
||||||
Here are a few common installation problems and their solutions. Often these are caused by
|
Here are a few common installation problems and their solutions. Often these are
|
||||||
incomplete installations or crashes during the install process.
|
caused by incomplete installations or crashes during the install process.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### **QUESTION**
|
### During `conda env create`, conda hangs indefinitely
|
||||||
|
|
||||||
During `conda env create`, conda hangs indefinitely.
|
If it is because of the last PIP step (usually stuck in the Git Clone step, you
|
||||||
|
can check the detailed log by this method):
|
||||||
|
|
||||||
If it is because of the last PIP step (usually stuck in the Git Clone step, you can check the detailed log by this method):
|
|
||||||
```bash
|
```bash
|
||||||
export PIP_LOG="/tmp/pip_log.txt"
|
export PIP_LOG="/tmp/pip_log.txt"
|
||||||
touch ${PIP_LOG}
|
touch ${PIP_LOG}
|
||||||
tail -f ${PIP_LOG} &
|
tail -f ${PIP_LOG} &
|
||||||
conda env create -f environment-mac.yaml --debug --verbose
|
conda env create -f environment-mac.yaml --debug --verbose
|
||||||
killall tail
|
killall tail
|
||||||
rm ${PIP_LOG}
|
rm ${PIP_LOG}
|
||||||
@ -29,21 +27,20 @@ rm ${PIP_LOG}
|
|||||||
|
|
||||||
**SOLUTION**
|
**SOLUTION**
|
||||||
|
|
||||||
Conda sometimes gets stuck at the last PIP step, in which several git repositories are
|
Conda sometimes gets stuck at the last PIP step, in which several git
|
||||||
cloned and built.
|
repositories are cloned and built.
|
||||||
|
|
||||||
Enter the stable-diffusion directory and completely remove the `src`
|
Enter the stable-diffusion directory and completely remove the `src` directory
|
||||||
directory and all its contents. The safest way to do this is to enter
|
and all its contents. The safest way to do this is to enter the stable-diffusion
|
||||||
the stable-diffusion directory and give the command `git clean -f`. If
|
directory and give the command `git clean -f`. If this still doesn't fix the
|
||||||
this still doesn't fix the problem, try "conda clean -all" and then
|
problem, try "conda clean -all" and then restart at the `conda env create` step.
|
||||||
restart at the `conda env create` step.
|
|
||||||
|
|
||||||
To further understand the problem to checking the install lot using this method:
|
To further understand the problem to checking the install lot using this method:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export PIP_LOG="/tmp/pip_log.txt"
|
export PIP_LOG="/tmp/pip_log.txt"
|
||||||
touch ${PIP_LOG}
|
touch ${PIP_LOG}
|
||||||
tail -f ${PIP_LOG} &
|
tail -f ${PIP_LOG} &
|
||||||
conda env create -f environment-mac.yaml --debug --verbose
|
conda env create -f environment-mac.yaml --debug --verbose
|
||||||
killall tail
|
killall tail
|
||||||
rm ${PIP_LOG}
|
rm ${PIP_LOG}
|
||||||
@ -51,43 +48,44 @@ rm ${PIP_LOG}
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### **QUESTION**
|
### `invoke.py` crashes with the complaint that it can't find `ldm.simplet2i.py`
|
||||||
|
|
||||||
`invoke.py` crashes with the complaint that it can't find `ldm.simplet2i.py`. Or it complains that
|
Or it complains that function is being passed incorrect parameters.
|
||||||
function is being passed incorrect parameters.
|
|
||||||
|
|
||||||
### **SOLUTION**
|
**SOLUTION**
|
||||||
|
|
||||||
Reinstall the stable diffusion modules. Enter the `stable-diffusion` directory and give the command
|
Reinstall the stable diffusion modules. Enter the `stable-diffusion` directory
|
||||||
`pip install -e .`
|
and give the command `pip install -e .`
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### **QUESTION**
|
### Missing modules
|
||||||
|
|
||||||
`invoke.py` dies, complaining of various missing modules, none of which starts with `ldm`.
|
`invoke.py` dies, complaining of various missing modules, none of which starts
|
||||||
|
with `ldm`.
|
||||||
|
|
||||||
### **SOLUTION**
|
**SOLUTION**
|
||||||
|
|
||||||
From within the `InvokeAI` directory, run `conda env update` This is also frequently the solution to
|
From within the `InvokeAI` directory, run `conda env update` This is also
|
||||||
complaints about an unknown function in a module.
|
frequently the solution to complaints about an unknown function in a module.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### **QUESTION**
|
### How can I try new features
|
||||||
|
|
||||||
There's a feature or bugfix in the Stable Diffusion GitHub that you want to try out.
|
There's a feature or bugfix in the Stable Diffusion GitHub that you want to try
|
||||||
|
out.
|
||||||
|
|
||||||
### **SOLUTION**
|
**SOLUTIONS**
|
||||||
|
|
||||||
#### **Main Branch**
|
#### **Main Branch**
|
||||||
|
|
||||||
If the fix/feature is on the `main` branch, enter the stable-diffusion directory and do a
|
If the fix/feature is on the `main` branch, enter the stable-diffusion directory
|
||||||
`git pull`.
|
and do a `git pull`.
|
||||||
|
|
||||||
Usually this will be sufficient, but if you start to see errors about
|
Usually this will be sufficient, but if you start to see errors about missing or
|
||||||
missing or incorrect modules, use the command `pip install -e .`
|
incorrect modules, use the command `pip install -e .` and/or `conda env update`
|
||||||
and/or `conda env update` (These commands won't break anything.)
|
(These commands won't break anything.)
|
||||||
|
|
||||||
`pip install -e .` and/or `conda env update -f environment.yaml`
|
`pip install -e .` and/or `conda env update -f environment.yaml`
|
||||||
|
|
||||||
@ -95,33 +93,36 @@ and/or `conda env update` (These commands won't break anything.)
|
|||||||
|
|
||||||
#### **Sub Branch**
|
#### **Sub Branch**
|
||||||
|
|
||||||
If the feature/fix is on a branch (e.g. "_foo-bugfix_"), the recipe is similar, but do a
|
If the feature/fix is on a branch (e.g. "_foo-bugfix_"), the recipe is similar,
|
||||||
`git pull <name of branch>`.
|
but do a `git pull <name of branch>`.
|
||||||
|
|
||||||
#### **Not Committed**
|
#### **Not Committed**
|
||||||
|
|
||||||
If the feature/fix is in a pull request that has not yet been made part of the main branch or a
|
If the feature/fix is in a pull request that has not yet been made part of the
|
||||||
feature/bugfix branch, then from the page for the desired pull request, look for the line at the top
|
main branch or a feature/bugfix branch, then from the page for the desired pull
|
||||||
that reads "_xxxx wants to merge xx commits into lstein:main from YYYYYY_". Copy the URL in YYYY. It
|
request, look for the line at the top that reads "_xxxx wants to merge xx
|
||||||
should have the format
|
commits into lstein:main from YYYYYY_". Copy the URL in YYYY. It should have the
|
||||||
|
format
|
||||||
|
|
||||||
`https://github.com/<name of contributor>/stable-diffusion/tree/<name of branch>`
|
`https://github.com/<name of contributor>/stable-diffusion/tree/<name of branch>`
|
||||||
|
|
||||||
Then **go to the directory above stable-diffusion** and rename the directory to
|
Then **go to the directory above stable-diffusion** and rename the directory to
|
||||||
"_stable-diffusion.lstein_", "_stable-diffusion.old_", or anything else. You can then git clone the
|
"_stable-diffusion.lstein_", "_stable-diffusion.old_", or anything else. You can
|
||||||
branch that contains the pull request:
|
then git clone the branch that contains the pull request:
|
||||||
|
|
||||||
`git clone https://github.com/<name of contributor>/stable-diffusion/tree/<name of branch>`
|
`git clone https://github.com/<name of contributor>/stable-diffusion/tree/<name of branch>`
|
||||||
|
|
||||||
You will need to go through the install procedure again, but it should be fast because all the
|
You will need to go through the install procedure again, but it should be fast
|
||||||
dependencies are already loaded.
|
because all the dependencies are already loaded.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### **QUESTION**
|
### CUDA out of memory
|
||||||
|
|
||||||
Image generation crashed with CUDA out of memory error after successful sampling.
|
Image generation crashed with CUDA out of memory error after successful
|
||||||
|
sampling.
|
||||||
|
|
||||||
### **SOLUTION**
|
**SOLUTION**
|
||||||
|
|
||||||
Try to run script with option `--free_gpu_mem` This will free memory before image decoding step.
|
Try to run script with option `--free_gpu_mem` This will free memory before
|
||||||
|
image decoding step.
|
||||||
|
200
docs/index.md
@ -14,43 +14,61 @@ title: Home
|
|||||||
|
|
||||||
# ^^**InvokeAI: A Stable Diffusion Toolkit**^^ :tools: <br> <small>Formerly known as lstein/stable-diffusion</small>
|
# ^^**InvokeAI: A Stable Diffusion Toolkit**^^ :tools: <br> <small>Formerly known as lstein/stable-diffusion</small>
|
||||||
|
|
||||||

|
[](https://github.com/invoke-ai/InvokeAI)
|
||||||
|
|
||||||
[![discord badge]][discord link]
|
[![discord badge]][discord link]
|
||||||
|
|
||||||
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]
|
[![latest release badge]][latest release link]
|
||||||
|
[![github stars badge]][github stars link]
|
||||||
|
[![github forks badge]][github forks link]
|
||||||
|
|
||||||
[![CI checks on main badge]][CI checks on main link] [![CI checks on dev badge]][CI checks on dev link] [![latest commit to dev badge]][latest commit to dev link]
|
[![CI checks on main badge]][ci checks on main link]
|
||||||
|
[![CI checks on dev badge]][ci checks on dev link]
|
||||||
|
[![latest commit to dev badge]][latest commit to dev link]
|
||||||
|
|
||||||
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link]
|
[![github open issues badge]][github open issues link]
|
||||||
|
[![github open prs badge]][github open prs link]
|
||||||
|
|
||||||
[CI checks on dev badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
|
[ci checks on dev badge]:
|
||||||
[CI checks on dev link]: https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
|
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
|
||||||
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
[ci checks on dev link]:
|
||||||
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
|
||||||
|
[ci checks on main badge]:
|
||||||
|
https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||||
|
[ci checks on main link]:
|
||||||
|
https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
||||||
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
||||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||||
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
[github forks badge]:
|
||||||
[github forks link]: https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion
|
https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||||
[github open issues badge]: https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github
|
[github forks link]:
|
||||||
[github open issues link]: https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen
|
https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion
|
||||||
[github open prs badge]: https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github
|
[github open issues badge]:
|
||||||
[github open prs link]: https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
|
https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github
|
||||||
[github stars badge]: https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
|
[github open issues link]:
|
||||||
|
https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen
|
||||||
|
[github open prs badge]:
|
||||||
|
https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github
|
||||||
|
[github open prs link]:
|
||||||
|
https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
|
||||||
|
[github stars badge]:
|
||||||
|
https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
|
||||||
[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
|
[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
|
||||||
[latest commit to dev badge]: https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
|
[latest commit to dev badge]:
|
||||||
[latest commit to dev link]: https://github.com/invoke-ai/InvokeAI/commits/development
|
https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
|
||||||
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
[latest commit to dev link]:
|
||||||
|
https://github.com/invoke-ai/InvokeAI/commits/development
|
||||||
|
[latest release badge]:
|
||||||
|
https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<a href="https://github.com/invoke-ai/InvokeAI">InvokeAI</a> is an
|
<a href="https://github.com/invoke-ai/InvokeAI">InvokeAI</a> is an
|
||||||
implementation of Stable Diffusion, the open source text-to-image and
|
implementation of Stable Diffusion, the open source text-to-image and
|
||||||
image-to-image generator. It provides a streamlined process with
|
image-to-image generator. It provides a streamlined process with various new
|
||||||
various new features and options to aid the image generation
|
features and options to aid the image generation process. It runs on Windows,
|
||||||
process. It runs on Windows, Mac and Linux machines, and runs on GPU
|
Mac and Linux machines, and runs on GPU cards with as little as 4 GB or RAM.
|
||||||
cards with as little as 4 GB or RAM.
|
|
||||||
|
|
||||||
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||||
|
|
||||||
@ -62,8 +80,8 @@ cards with as little as 4 GB or RAM.
|
|||||||
|
|
||||||
## :octicons-package-dependencies-24: Installation
|
## :octicons-package-dependencies-24: Installation
|
||||||
|
|
||||||
This fork is supported across multiple platforms. You can find individual installation instructions
|
This fork is supported across multiple platforms. You can find individual
|
||||||
below.
|
installation instructions below.
|
||||||
|
|
||||||
- :fontawesome-brands-linux: [Linux](installation/INSTALL_LINUX.md)
|
- :fontawesome-brands-linux: [Linux](installation/INSTALL_LINUX.md)
|
||||||
- :fontawesome-brands-windows: [Windows](installation/INSTALL_WINDOWS.md)
|
- :fontawesome-brands-windows: [Windows](installation/INSTALL_WINDOWS.md)
|
||||||
@ -76,6 +94,7 @@ below.
|
|||||||
You wil need one of the following:
|
You wil need one of the following:
|
||||||
|
|
||||||
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||||
|
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux only)
|
||||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||||
|
|
||||||
### :fontawesome-solid-memory: Memory
|
### :fontawesome-solid-memory: Memory
|
||||||
@ -84,7 +103,8 @@ You wil need one of the following:
|
|||||||
|
|
||||||
### :fontawesome-regular-hard-drive: Disk
|
### :fontawesome-regular-hard-drive: Disk
|
||||||
|
|
||||||
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
- At least 12 GB of free disk space for the machine learning model, Python, and
|
||||||
|
all its dependencies.
|
||||||
|
|
||||||
!!! info
|
!!! info
|
||||||
|
|
||||||
@ -93,8 +113,8 @@ You wil need one of the following:
|
|||||||
|
|
||||||
Similarly, specify full-precision mode on Apple M1 hardware.
|
Similarly, specify full-precision mode on Apple M1 hardware.
|
||||||
|
|
||||||
Precision is auto configured based on the device. If however you encounter errors like
|
Precision is auto configured based on the device. If however you encounter errors like
|
||||||
`expected type Float but found Half` or `not implemented for Half` you can try starting
|
`expected type Float but found Half` or `not implemented for Half` you can try starting
|
||||||
`invoke.py` with the `--precision=float32` flag:
|
`invoke.py` with the `--precision=float32` flag:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -103,73 +123,127 @@ You wil need one of the following:
|
|||||||
|
|
||||||
## :octicons-log-16: Latest Changes
|
## :octicons-log-16: Latest Changes
|
||||||
|
|
||||||
|
### v2.1.0 <small>(2 November 2022)</small>
|
||||||
|
|
||||||
|
- [Inpainting](https://invoke-ai.github.io/InvokeAI/features/INPAINTING/)
|
||||||
|
support in the WebGUI
|
||||||
|
- Greatly improved navigation and user experience in the
|
||||||
|
[WebGUI](https://invoke-ai.github.io/InvokeAI/features/WEB/)
|
||||||
|
- The prompt syntax has been enhanced with
|
||||||
|
[prompt weighting, cross-attention and prompt merging](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/).
|
||||||
|
- You can now load
|
||||||
|
[multiple models and switch among them quickly](https://docs.google.com/presentation/d/1WywGA1rny7bpFh7CLSdTr4nNpVKdlUeT0Bj0jCsILyU/edit?usp=sharing)
|
||||||
|
without leaving the CLI.
|
||||||
|
- The installation process (via `scripts/preload_models.py`) now lets you select
|
||||||
|
among several popular
|
||||||
|
[Stable Diffusion models](https://invoke-ai.github.io/InvokeAI/installation/INSTALLING_MODELS/)
|
||||||
|
and downloads and installs them on your behalf. Among other models, this
|
||||||
|
script will install the current Stable Diffusion 1.5 model as well as a
|
||||||
|
StabilityAI variable autoencoder (VAE) which improves face generation.
|
||||||
|
- Tired of struggling with photoeditors to get the masked region of for
|
||||||
|
inpainting just right? Let the AI make the mask for you using
|
||||||
|
[text masking](https://docs.google.com/presentation/d/1pWoY510hCVjz0M6X9CBbTznZgW2W5BYNKrmZm7B45q8/edit#slide=id.p).
|
||||||
|
This feature allows you to specify the part of the image to paint over using
|
||||||
|
just English-language phrases.
|
||||||
|
- Tired of seeing the head of your subjects cropped off? Uncrop them in the CLI
|
||||||
|
with the
|
||||||
|
[outcrop feature](https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/#outcrop).
|
||||||
|
- Tired of seeing your subject's bodies duplicated or mangled when generating
|
||||||
|
larger-dimension images? Check out the `--hires` option in the CLI, or select
|
||||||
|
the corresponding toggle in the WebGUI.
|
||||||
|
- We now support textual inversion and fine-tune .bin styles and subjects from
|
||||||
|
the Hugging Face archive of
|
||||||
|
[SD Concepts](https://huggingface.co/sd-concepts-library). Load the .bin file
|
||||||
|
using the `--embedding_path` option. (The next version will support merging
|
||||||
|
and loading of multiple simultaneous models).
|
||||||
|
- ...
|
||||||
|
|
||||||
### v2.0.1 <small>(13 October 2022)</small>
|
### v2.0.1 <small>(13 October 2022)</small>
|
||||||
|
|
||||||
- fix noisy images at high step count when using k* samplers
|
- fix noisy images at high step count when using k\* samplers
|
||||||
- dream.py script now calls invoke.py module directly rather than
|
- dream.py script now calls invoke.py module directly rather than via a new
|
||||||
via a new python process (which could break the environment)
|
python process (which could break the environment)
|
||||||
|
|
||||||
### v2.0.0 <small>(9 October 2022)</small>
|
### v2.0.0 <small>(9 October 2022)</small>
|
||||||
|
|
||||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains for
|
||||||
for backward compatibility.
|
backward compatibility.
|
||||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/INPAINTING/">inpainting</a> and <a href="https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/">outpainting</a>
|
- Support for
|
||||||
- img2img runs on all k* samplers
|
<a href="https://invoke-ai.github.io/InvokeAI/features/INPAINTING/">inpainting</a>
|
||||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts">negative prompts</a>
|
and
|
||||||
|
<a href="https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/">outpainting</a>
|
||||||
|
- img2img runs on all k\* samplers
|
||||||
|
- Support for
|
||||||
|
<a href="https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts">negative
|
||||||
|
prompts</a>
|
||||||
- Support for CodeFormer face reconstruction
|
- Support for CodeFormer face reconstruction
|
||||||
- Support for Textual Inversion on Macintoshes
|
- Support for Textual Inversion on Macintoshes
|
||||||
- Support in both WebGUI and CLI for <a href="https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/">post-processing of previously-generated images</a>
|
- Support in both WebGUI and CLI for
|
||||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
<a href="https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/">post-processing
|
||||||
and "embiggen" upscaling. See the `!fix` command.
|
of previously-generated images</a> using facial reconstruction, ESRGAN
|
||||||
- New `--hires` option on `invoke>` line allows <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/#txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
upscaling, outcropping (similar to DALL-E infinite canvas), and "embiggen"
|
||||||
- New `--perlin` and `--threshold` options allow you to add and control variation
|
upscaling. See the `!fix` command.
|
||||||
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
|
- New `--hires` option on `invoke>` line allows
|
||||||
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
<a href="https://invoke-ai.github.io/InvokeAI/features/CLI/#txt2img">larger
|
||||||
and tweaking of previous settings.
|
images to be created without duplicating elements</a>, at the cost of some
|
||||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
performance.
|
||||||
- Improved <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/">command-line completion behavior</a>.
|
- New `--perlin` and `--threshold` options allow you to add and control
|
||||||
New commands added:
|
variation during image generation (see
|
||||||
|
<a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding
|
||||||
|
and Perlin Noise Initialization</a>
|
||||||
|
- Extensive metadata now written into PNG files, allowing reliable regeneration
|
||||||
|
of images and tweaking of previous settings.
|
||||||
|
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac
|
||||||
|
platforms.
|
||||||
|
- Improved
|
||||||
|
<a href="https://invoke-ai.github.io/InvokeAI/features/CLI/">command-line
|
||||||
|
completion behavior</a>. New commands added:
|
||||||
- List command-line history with `!history`
|
- List command-line history with `!history`
|
||||||
- Search command-line history with `!search`
|
- Search command-line history with `!search`
|
||||||
- Clear history with `!clear`
|
- Clear history with `!clear`
|
||||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||||
configure. To switch away from auto use the new flag like `--precision=float32`.
|
configure. To switch away from auto use the new flag like
|
||||||
|
`--precision=float32`.
|
||||||
|
|
||||||
For older changelogs, please visit the **[CHANGELOG](CHANGELOG.md#v114-11-september-2022)**.
|
For older changelogs, please visit the
|
||||||
|
**[CHANGELOG](CHANGELOG/#v114-11-september-2022)**.
|
||||||
|
|
||||||
## :material-target: Troubleshooting
|
## :material-target: Troubleshooting
|
||||||
|
|
||||||
Please check out our **[:material-frequently-asked-questions: Q&A](help/TROUBLESHOOT.md)** to get solutions for common installation
|
Please check out our
|
||||||
problems and other issues.
|
**[:material-frequently-asked-questions: Q&A](help/TROUBLESHOOT.md)** to get
|
||||||
|
solutions for common installation problems and other issues.
|
||||||
|
|
||||||
## :octicons-repo-push-24: Contributing
|
## :octicons-repo-push-24: Contributing
|
||||||
|
|
||||||
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
Anyone who wishes to contribute to this project, whether documentation,
|
||||||
cleanup, testing, or code reviews, is very much encouraged to do so. If you are unfamiliar with how
|
features, bug fixes, code cleanup, testing, or code reviews, is very much
|
||||||
to contribute to GitHub projects, here is a
|
encouraged to do so. If you are unfamiliar with how to contribute to GitHub
|
||||||
|
projects, here is a
|
||||||
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github).
|
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github).
|
||||||
|
|
||||||
A full set of contribution guidelines, along with templates, are in progress, but for now the most
|
A full set of contribution guidelines, along with templates, are in progress,
|
||||||
important thing is to **make your pull request against the "development" branch**, and not against
|
but for now the most important thing is to **make your pull request against the
|
||||||
"main". This will help keep public breakage to a minimum and will allow you to propose more radical
|
"development" branch**, and not against "main". This will help keep public
|
||||||
changes.
|
breakage to a minimum and will allow you to propose more radical changes.
|
||||||
|
|
||||||
## :octicons-person-24: Contributors
|
## :octicons-person-24: Contributors
|
||||||
|
|
||||||
This fork is a combined effort of various people from across the world.
|
This fork is a combined effort of various people from across the world.
|
||||||
[Check out the list of all these amazing people](other/CONTRIBUTORS.md). We thank them for their
|
[Check out the list of all these amazing people](other/CONTRIBUTORS.md). We
|
||||||
time, hard work and effort.
|
thank them for their time, hard work and effort.
|
||||||
|
|
||||||
## :octicons-question-24: Support
|
## :octicons-question-24: Support
|
||||||
|
|
||||||
For support, please use this repository's GitHub Issues tracking service. Feel free to send me an
|
For support, please use this repository's GitHub Issues tracking service. Feel
|
||||||
email if you use and like the script.
|
free to send me an email if you use and like the script.
|
||||||
|
|
||||||
Original portions of the software are Copyright (c) 2020
|
Original portions of the software are Copyright (c) 2020
|
||||||
[Lincoln D. Stein](https://github.com/lstein)
|
[Lincoln D. Stein](https://github.com/lstein)
|
||||||
|
|
||||||
## :octicons-book-24: Further Reading
|
## :octicons-book-24: Further Reading
|
||||||
|
|
||||||
Please see the original README for more information on this software and underlying algorithm,
|
Please see the original README for more information on this software and
|
||||||
located in the file [README-CompViz.md](other/README-CompViz.md).
|
underlying algorithm, located in the file
|
||||||
|
[README-CompViz.md](other/README-CompViz.md).
|
||||||
|
@ -6,224 +6,223 @@ title: Installing Models
|
|||||||
|
|
||||||
## Model Weight Files
|
## Model Weight Files
|
||||||
|
|
||||||
The model weight files ('*.ckpt') are the Stable Diffusion "secret
|
The model weight files ('\*.ckpt') are the Stable Diffusion "secret sauce". They
|
||||||
sauce". They are the product of training the AI on millions of
|
are the product of training the AI on millions of captioned images gathered from
|
||||||
captioned images gathered from multiple sources.
|
multiple sources.
|
||||||
|
|
||||||
Originally there was only a single Stable Diffusion weights file,
|
Originally there was only a single Stable Diffusion weights file, which many
|
||||||
which many people named `model.ckpt`. Now there are dozens or more
|
people named `model.ckpt`. Now there are dozens or more that have been "fine
|
||||||
that have been "fine tuned" to provide particulary styles, genres, or
|
tuned" to provide particulary styles, genres, or other features. InvokeAI allows
|
||||||
other features. InvokeAI allows you to install and run multiple model
|
you to install and run multiple model weight files and switch between them
|
||||||
weight files and switch between them quickly in the command-line and
|
quickly in the command-line and web interfaces.
|
||||||
web interfaces.
|
|
||||||
|
|
||||||
This manual will guide you through installing and configuring model
|
This manual will guide you through installing and configuring model weight
|
||||||
weight files.
|
files.
|
||||||
|
|
||||||
## Base Models
|
## Base Models
|
||||||
|
|
||||||
InvokeAI comes with support for a good initial set of models listed in
|
InvokeAI comes with support for a good initial set of models listed in the model
|
||||||
the model configuration file `configs/models.yaml`. They are:
|
configuration file `configs/models.yaml`. They are:
|
||||||
|
|
||||||
| Model | Weight File | Description | DOWNLOAD FROM |
|
| Model | Weight File | Description | DOWNLOAD FROM |
|
||||||
| ---------------------- | ----------------------------- |--------------------------------- | ----------------|
|
| -------------------- | --------------------------------- | ---------------------------------------------------------- | -------------------------------------------------------------- |
|
||||||
| stable-diffusion-1.5 | v1-5-pruned-emaonly.ckpt | Most recent version of base Stable Diffusion model| https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
| stable-diffusion-1.5 | v1-5-pruned-emaonly.ckpt | Most recent version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
||||||
| stable-diffusion-1.4 | sd-v1-4.ckpt | Previous version of base Stable Diffusion model | https://huggingface.co/CompVis/stable-diffusion-v-1-4-original |
|
| stable-diffusion-1.4 | sd-v1-4.ckpt | Previous version of base Stable Diffusion model | https://huggingface.co/CompVis/stable-diffusion-v-1-4-original |
|
||||||
| inpainting-1.5 | sd-v1-5-inpainting.ckpt | Stable Diffusion 1.5 model specialized for inpainting | https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
| inpainting-1.5 | sd-v1-5-inpainting.ckpt | Stable Diffusion 1.5 model specialized for inpainting | https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
||||||
| waifu-diffusion-1.3 | model-epoch09-float32.ckpt | Stable Diffusion 1.4 trained to produce anime images | https://huggingface.co/hakurei/waifu-diffusion-v1-3 |
|
| waifu-diffusion-1.3 | model-epoch09-float32.ckpt | Stable Diffusion 1.4 trained to produce anime images | https://huggingface.co/hakurei/waifu-diffusion-v1-3 |
|
||||||
| <all models> | vae-ft-mse-840000-ema-pruned.ckpt | A fine-tune file add-on file that improves face generation | https://huggingface.co/stabilityai/sd-vae-ft-mse-original/ |
|
| `<all models>` | vae-ft-mse-840000-ema-pruned.ckpt | A fine-tune file add-on file that improves face generation | https://huggingface.co/stabilityai/sd-vae-ft-mse-original/ |
|
||||||
|
|
||||||
|
Note that these files are covered by an "Ethical AI" license which forbids
|
||||||
Note that these files are covered by an "Ethical AI" license which
|
certain uses. You will need to create an account on the Hugging Face website and
|
||||||
forbids certain uses. You will need to create an account on the
|
accept the license terms before you can access the files.
|
||||||
Hugging Face website and accept the license terms before you can
|
|
||||||
access the files.
|
|
||||||
|
|
||||||
The predefined configuration file for InvokeAI (located at
|
The predefined configuration file for InvokeAI (located at
|
||||||
`configs/models.yaml`) provides entries for each of these weights
|
`configs/models.yaml`) provides entries for each of these weights files.
|
||||||
files. `stable-diffusion-1.5` is the default model used, and we
|
`stable-diffusion-1.5` is the default model used, and we strongly recommend that
|
||||||
strongly recommend that you install this weights file if nothing else.
|
you install this weights file if nothing else.
|
||||||
|
|
||||||
## Community-Contributed Models
|
## Community-Contributed Models
|
||||||
|
|
||||||
There are too many to list here and more are being contributed every
|
There are too many to list here and more are being contributed every day.
|
||||||
day. Hugging Face maintains a [fast-growing
|
Hugging Face maintains a
|
||||||
repository](https://huggingface.co/sd-concepts-library) of fine-tune
|
[fast-growing repository](https://huggingface.co/sd-concepts-library) of
|
||||||
(".bin") models that can be imported into InvokeAI by passing the
|
fine-tune (".bin") models that can be imported into InvokeAI by passing the
|
||||||
`--embedding_path` option to the `invoke.py` command.
|
`--embedding_path` option to the `invoke.py` command.
|
||||||
|
|
||||||
[This page](https://rentry.org/sdmodels) hosts a large list of
|
[This page](https://rentry.org/sdmodels) hosts a large list of official and
|
||||||
official and unofficial Stable Diffusion models and where they can be
|
unofficial Stable Diffusion models and where they can be obtained.
|
||||||
obtained.
|
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
There are three ways to install weights files:
|
There are three ways to install weights files:
|
||||||
|
|
||||||
1. During InvokeAI installation, the `preload_models.py` script can
|
1. During InvokeAI installation, the `preload_models.py` script can download
|
||||||
download them for you.
|
them for you.
|
||||||
|
|
||||||
2. You can use the command-line interface (CLI) to import, configure
|
2. You can use the command-line interface (CLI) to import, configure and modify
|
||||||
and modify new models files.
|
new models files.
|
||||||
|
|
||||||
3. You can download the files manually and add the appropriate entries
|
3. You can download the files manually and add the appropriate entries to
|
||||||
to `models.yaml`.
|
`models.yaml`.
|
||||||
|
|
||||||
### Installation via `preload_models.py`
|
### Installation via `preload_models.py`
|
||||||
|
|
||||||
This is the most automatic way. Run `scripts/preload_models.py` from
|
This is the most automatic way. Run `scripts/preload_models.py` from the
|
||||||
the console. It will ask you to select which models to download and
|
console. It will ask you to select which models to download and lead you through
|
||||||
lead you through the steps of setting up a Hugging Face account if you
|
the steps of setting up a Hugging Face account if you haven't done so already.
|
||||||
haven't done so already.
|
|
||||||
|
|
||||||
To start, from within the InvokeAI directory run the command `python
|
To start, run `python scripts/preload_models.py` from within the InvokeAI:
|
||||||
scripts/preload_models.py` (Linux/MacOS) or `python
|
directory
|
||||||
scripts\preload_models.py` (Windows):
|
|
||||||
|
|
||||||
```
|
!!! example ""
|
||||||
Loading Python libraries...
|
|
||||||
|
|
||||||
** INTRODUCTION **
|
```text
|
||||||
Welcome to InvokeAI. This script will help download the Stable Diffusion weight files
|
Loading Python libraries...
|
||||||
and other large models that are needed for text to image generation. At any point you may interrupt
|
|
||||||
this program and resume later.
|
|
||||||
|
|
||||||
** WEIGHT SELECTION **
|
** INTRODUCTION **
|
||||||
Would you like to download the Stable Diffusion model weights now? [y]
|
Welcome to InvokeAI. This script will help download the Stable Diffusion weight files
|
||||||
|
and other large models that are needed for text to image generation. At any point you may interrupt
|
||||||
|
this program and resume later.
|
||||||
|
|
||||||
Choose the weight file(s) you wish to download. Before downloading you
|
** WEIGHT SELECTION **
|
||||||
will be given the option to view and change your selections.
|
Would you like to download the Stable Diffusion model weights now? [y]
|
||||||
|
|
||||||
[1] stable-diffusion-1.5:
|
Choose the weight file(s) you wish to download. Before downloading you
|
||||||
The newest Stable Diffusion version 1.5 weight file (4.27 GB) (recommended)
|
will be given the option to view and change your selections.
|
||||||
Download? [y]
|
|
||||||
[2] inpainting-1.5:
|
|
||||||
RunwayML SD 1.5 model optimized for inpainting (4.27 GB) (recommended)
|
|
||||||
Download? [y]
|
|
||||||
[3] stable-diffusion-1.4:
|
|
||||||
The original Stable Diffusion version 1.4 weight file (4.27 GB)
|
|
||||||
Download? [n] n
|
|
||||||
[4] waifu-diffusion-1.3:
|
|
||||||
Stable Diffusion 1.4 fine tuned on anime-styled images (4.27)
|
|
||||||
Download? [n] y
|
|
||||||
[5] ft-mse-improved-autoencoder-840000:
|
|
||||||
StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB) (recommended)
|
|
||||||
Download? [y] y
|
|
||||||
The following weight files will be downloaded:
|
|
||||||
[1] stable-diffusion-1.5*
|
|
||||||
[2] inpainting-1.5
|
|
||||||
[4] waifu-diffusion-1.3
|
|
||||||
[5] ft-mse-improved-autoencoder-840000
|
|
||||||
*default
|
|
||||||
Ok to download? [y]
|
|
||||||
** LICENSE AGREEMENT FOR WEIGHT FILES **
|
|
||||||
|
|
||||||
1. To download the Stable Diffusion weight files you need to read and accept the
|
[1] stable-diffusion-1.5:
|
||||||
CreativeML Responsible AI license. If you have not already done so, please
|
The newest Stable Diffusion version 1.5 weight file (4.27 GB) (recommended)
|
||||||
create an account using the "Sign Up" button:
|
Download? [y]
|
||||||
|
[2] inpainting-1.5:
|
||||||
|
RunwayML SD 1.5 model optimized for inpainting (4.27 GB) (recommended)
|
||||||
|
Download? [y]
|
||||||
|
[3] stable-diffusion-1.4:
|
||||||
|
The original Stable Diffusion version 1.4 weight file (4.27 GB)
|
||||||
|
Download? [n] n
|
||||||
|
[4] waifu-diffusion-1.3:
|
||||||
|
Stable Diffusion 1.4 fine tuned on anime-styled images (4.27)
|
||||||
|
Download? [n] y
|
||||||
|
[5] ft-mse-improved-autoencoder-840000:
|
||||||
|
StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB) (recommended)
|
||||||
|
Download? [y] y
|
||||||
|
The following weight files will be downloaded:
|
||||||
|
[1] stable-diffusion-1.5*
|
||||||
|
[2] inpainting-1.5
|
||||||
|
[4] waifu-diffusion-1.3
|
||||||
|
[5] ft-mse-improved-autoencoder-840000
|
||||||
|
*default
|
||||||
|
Ok to download? [y]
|
||||||
|
** LICENSE AGREEMENT FOR WEIGHT FILES **
|
||||||
|
|
||||||
https://huggingface.co
|
1. To download the Stable Diffusion weight files you need to read and accept the
|
||||||
|
CreativeML Responsible AI license. If you have not already done so, please
|
||||||
|
create an account using the "Sign Up" button:
|
||||||
|
|
||||||
You will need to verify your email address as part of the HuggingFace
|
https://huggingface.co
|
||||||
registration process.
|
|
||||||
|
|
||||||
2. After creating the account, login under your account and accept
|
You will need to verify your email address as part of the HuggingFace
|
||||||
the license terms located here:
|
registration process.
|
||||||
|
|
||||||
https://huggingface.co/CompVis/stable-diffusion-v-1-4-original
|
2. After creating the account, login under your account and accept
|
||||||
|
the license terms located here:
|
||||||
|
|
||||||
Press <enter> when you are ready to continue:
|
https://huggingface.co/CompVis/stable-diffusion-v-1-4-original
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
When the script is complete, you will find the downloaded weights
|
Press <enter> when you are ready to continue:
|
||||||
files in `models/ldm/stable-diffusion-v1` and a matching configuration
|
...
|
||||||
file in `configs/models.yaml`.
|
```
|
||||||
|
|
||||||
You can run the script again to add any models you didn't select the
|
When the script is complete, you will find the downloaded weights files in
|
||||||
first time. Note that as a safety measure the script will _never_
|
`models/ldm/stable-diffusion-v1` and a matching configuration file in
|
||||||
remove a previously-installed weights file. You will have to do this
|
`configs/models.yaml`.
|
||||||
manually.
|
|
||||||
|
You can run the script again to add any models you didn't select the first time.
|
||||||
|
Note that as a safety measure the script will _never_ remove a
|
||||||
|
previously-installed weights file. You will have to do this manually.
|
||||||
|
|
||||||
### Installation via the CLI
|
### Installation via the CLI
|
||||||
|
|
||||||
You can install a new model, including any of the community-supported
|
You can install a new model, including any of the community-supported ones, via
|
||||||
ones, via the command-line client's `!import_model` command.
|
the command-line client's `!import_model` command.
|
||||||
|
|
||||||
1. First download the desired model weights file and place it under `models/ldm/stable-diffusion-v1/`.
|
1. First download the desired model weights file and place it under
|
||||||
You may rename the weights file to something more memorable if you wish. Record the path of the
|
`models/ldm/stable-diffusion-v1/`. You may rename the weights file to
|
||||||
weights file (e.g. `models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`)
|
something more memorable if you wish. Record the path of the weights file
|
||||||
|
(e.g. `models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`)
|
||||||
|
|
||||||
2. Launch the `invoke.py` CLI with `python scripts/invoke.py`.
|
2. Launch the `invoke.py` CLI with `python scripts/invoke.py`.
|
||||||
|
|
||||||
3. At the `invoke>` command-line, enter the command `!import_model <path to model>`.
|
3. At the `invoke>` command-line, enter the command
|
||||||
For example:
|
`!import_model <path to model>`. For example:
|
||||||
|
|
||||||
`invoke> !import_model models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`
|
`invoke> !import_model models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`
|
||||||
|
|
||||||
(Hint - the CLI supports file path autocompletion. Type a bit of the path
|
!!! tip "the CLI supports file path autocompletion"
|
||||||
name and hit <tab> in order to get a choice of possible completions.)
|
|
||||||
|
|
||||||
4. Follow the wizard's instructions to complete installation as shown in the example
|
Type a bit of the path name and hit ++tab++ in order to get a choice of
|
||||||
here:
|
possible completions.
|
||||||
|
|
||||||
```
|
4. Follow the wizard's instructions to complete installation as shown in the
|
||||||
invoke> <b>!import_model models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt</b>
|
example here:
|
||||||
>> Model import in process. Please enter the values needed to configure this model:
|
|
||||||
|
|
||||||
Name for this model: <b>arabian-nights</b>
|
!!! example ""
|
||||||
Description of this model: <b>Arabian Nights Fine Tune v1.0</b>
|
|
||||||
Configuration file for this model: <b>configs/stable-diffusion/v1-inference.yaml</b>
|
|
||||||
Default image width: <b>512</b>
|
|
||||||
Default image height: <b>512</b>
|
|
||||||
>> New configuration:
|
|
||||||
arabian-nights:
|
|
||||||
config: configs/stable-diffusion/v1-inference.yaml
|
|
||||||
description: Arabian Nights Fine Tune v1.0
|
|
||||||
height: 512
|
|
||||||
weights: models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
|
||||||
width: 512
|
|
||||||
OK to import [n]? <b>y</b>
|
|
||||||
>> Caching model stable-diffusion-1.4 in system RAM
|
|
||||||
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
|
||||||
| LatentDiffusion: Running in eps-prediction mode
|
|
||||||
| DiffusionWrapper has 859.52 M params.
|
|
||||||
| Making attention of type 'vanilla' with 512 in_channels
|
|
||||||
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
|
|
||||||
| Making attention of type 'vanilla' with 512 in_channels
|
|
||||||
| Using faster float16 precision
|
|
||||||
|
|
||||||
```
|
```text
|
||||||
|
invoke> !import_model models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
||||||
|
>> Model import in process. Please enter the values needed to configure this model:
|
||||||
|
|
||||||
If you've previously installed the fine-tune VAE file `vae-ft-mse-840000-ema-pruned.ckpt`,
|
Name for this model: arabian-nights
|
||||||
the wizard will also ask you if you want to add this VAE to the model.
|
Description of this model: Arabian Nights Fine Tune v1.0
|
||||||
|
Configuration file for this model: configs/stable-diffusion/v1-inference.yaml
|
||||||
|
Default image width: 512
|
||||||
|
Default image height: 512
|
||||||
|
>> New configuration:
|
||||||
|
arabian-nights:
|
||||||
|
config: configs/stable-diffusion/v1-inference.yaml
|
||||||
|
description: Arabian Nights Fine Tune v1.0
|
||||||
|
height: 512
|
||||||
|
weights: models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
||||||
|
width: 512
|
||||||
|
OK to import [n]? y
|
||||||
|
>> Caching model stable-diffusion-1.4 in system RAM
|
||||||
|
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
||||||
|
| LatentDiffusion: Running in eps-prediction mode
|
||||||
|
| DiffusionWrapper has 859.52 M params.
|
||||||
|
| Making attention of type 'vanilla' with 512 in_channels
|
||||||
|
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
|
||||||
|
| Making attention of type 'vanilla' with 512 in_channels
|
||||||
|
| Using faster float16 precision
|
||||||
|
```
|
||||||
|
|
||||||
The appropriate entry for this model will be added to `configs/models.yaml` and it will
|
If you've previously installed the fine-tune VAE file
|
||||||
be available to use in the CLI immediately.
|
`vae-ft-mse-840000-ema-pruned.ckpt`, the wizard will also ask you if you want to
|
||||||
|
add this VAE to the model.
|
||||||
|
|
||||||
The CLI has additional commands for switching among, viewing, editing,
|
The appropriate entry for this model will be added to `configs/models.yaml` and
|
||||||
deleting the available models. These are described in [Command Line
|
it will be available to use in the CLI immediately.
|
||||||
Client](../features/CLI.md#model-selection-and-importation), but the two most
|
|
||||||
frequently-used are `!models` and `!switch <name of model>`. The first
|
The CLI has additional commands for switching among, viewing, editing, deleting
|
||||||
prints a table of models that InvokeAI knows about and their load
|
the available models. These are described in
|
||||||
status. The second will load the requested model and lets you switch
|
[Command Line Client](../features/CLI.md#model-selection-and-importation), but
|
||||||
back and forth quickly among loaded models.
|
the two most frequently-used are `!models` and `!switch <name of model>`. The
|
||||||
|
first prints a table of models that InvokeAI knows about and their load status.
|
||||||
|
The second will load the requested model and lets you switch back and forth
|
||||||
|
quickly among loaded models.
|
||||||
|
|
||||||
### Manually editing of `configs/models.yaml`
|
### Manually editing of `configs/models.yaml`
|
||||||
|
|
||||||
If you are comfortable with a text editor then you may simply edit
|
If you are comfortable with a text editor then you may simply edit `models.yaml`
|
||||||
`models.yaml` directly.
|
directly.
|
||||||
|
|
||||||
First you need to download the desired .ckpt file and place it in
|
First you need to download the desired .ckpt file and place it in
|
||||||
`models/ldm/stable-diffusion-v1` as descirbed in step #1 in the
|
`models/ldm/stable-diffusion-v1` as descirbed in step #1 in the previous
|
||||||
previous section. Record the path to the weights file,
|
section. Record the path to the weights file, e.g.
|
||||||
e.g. `models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`
|
`models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`
|
||||||
|
|
||||||
Then using a **text** editor (e.g. the Windows Notepad application),
|
Then using a **text** editor (e.g. the Windows Notepad application), open the
|
||||||
open the file `configs/models.yaml`, and add a new stanza that follows
|
file `configs/models.yaml`, and add a new stanza that follows this model:
|
||||||
this model:
|
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
arabian-nights-1.0:
|
arabian-nights-1.0:
|
||||||
description: A great fine-tune in Arabian Nights style
|
description: A great fine-tune in Arabian Nights style
|
||||||
weights: ./models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
weights: ./models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
||||||
@ -234,34 +233,14 @@ arabian-nights-1.0:
|
|||||||
default: false
|
default: false
|
||||||
```
|
```
|
||||||
|
|
||||||
* arabian-nights-1.0
|
| name | description |
|
||||||
- This is the name of the model that you will refer to from within the
|
| :----------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
CLI and the WebGUI when you need to load and use the model.
|
| arabian-nights-1.0 | This is the name of the model that you will refer to from within the CLI and the WebGUI when you need to load and use the model. |
|
||||||
|
| description | Any description that you want to add to the model to remind you what it is. |
|
||||||
* description
|
| weights | Relative path to the .ckpt weights file for this model. |
|
||||||
- Any description that you want to add to the model to remind you what
|
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `preload_models.py` script. |
|
||||||
it is.
|
| vae | If you want to add a VAE file to the model, then enter its path here. |
|
||||||
|
| width, height | This is the width and height of the images used to train the model. Currently they are always 512 and 512. |
|
||||||
* weights
|
|
||||||
- Relative path to the .ckpt weights file for this model.
|
|
||||||
|
|
||||||
* config
|
|
||||||
- This is the confusingly-named configuration file for the model itself.
|
|
||||||
Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens
|
|
||||||
to need a custom configuration, in which case the place you downloaded it
|
|
||||||
from will tell you what to use instead. For example, the runwayML custom
|
|
||||||
inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`.
|
|
||||||
This is already inclued in the InvokeAI distribution and is configured automatically
|
|
||||||
for you by the `preload_models.py` script.
|
|
||||||
|
|
||||||
* vae
|
|
||||||
- If you want to add a VAE file to the model, then enter its path here.
|
|
||||||
|
|
||||||
* width, height
|
|
||||||
- This is the width and height of the images used to train the model.
|
|
||||||
Currently they are always 512 and 512.
|
|
||||||
|
|
||||||
Save the `models.yaml` and relaunch InvokeAI. The new model should now be
|
Save the `models.yaml` and relaunch InvokeAI. The new model should now be
|
||||||
available for your use.
|
available for your use.
|
||||||
|
|
||||||
|
|
||||||
|
@ -4,26 +4,30 @@ title: Docker
|
|||||||
|
|
||||||
# :fontawesome-brands-docker: Docker
|
# :fontawesome-brands-docker: Docker
|
||||||
|
|
||||||
## Before you begin
|
!!! warning "For end users"
|
||||||
|
|
||||||
- For end users: Install Stable Diffusion locally using the instructions for
|
We highly recommend to Install InvokeAI locally using [these instructions](index.md)"
|
||||||
your OS.
|
|
||||||
- For developers: For container-related development tasks or for enabling easy
|
!!! tip "For developers"
|
||||||
deployment to other environments (on-premises or cloud), follow these
|
|
||||||
instructions. For general use, install locally to leverage your machine's GPU.
|
For container-related development tasks or for enabling easy
|
||||||
|
deployment to other environments (on-premises or cloud), follow these
|
||||||
|
instructions.
|
||||||
|
|
||||||
|
For general use, install locally to leverage your machine's GPU.
|
||||||
|
|
||||||
## Why containers?
|
## Why containers?
|
||||||
|
|
||||||
They provide a flexible, reliable way to build and deploy Stable Diffusion.
|
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
|
||||||
You'll also use a Docker volume to store the largest model files and image
|
use a Docker volume to store the largest model files and image outputs as a
|
||||||
outputs as a first step in decoupling storage and compute. Future enhancements
|
first step in decoupling storage and compute. Future enhancements can do this
|
||||||
can do this for other assets. See [Processes](https://12factor.net/processes)
|
for other assets. See [Processes](https://12factor.net/processes) under the
|
||||||
under the Twelve-Factor App methodology for details on why running applications
|
Twelve-Factor App methodology for details on why running applications in such a
|
||||||
in such a stateless fashion is important.
|
stateless fashion is important.
|
||||||
|
|
||||||
You can specify the target platform when building the image and running the
|
You can specify the target platform when building the image and running the
|
||||||
container. You'll also need to specify the Stable Diffusion requirements file
|
container. You'll also need to specify the InvokeAI requirements file that
|
||||||
that matches the container's OS and the architecture it will run on.
|
matches the container's OS and the architecture it will run on.
|
||||||
|
|
||||||
Developers on Apple silicon (M1/M2): You
|
Developers on Apple silicon (M1/M2): You
|
||||||
[can't access your GPU cores from Docker containers](https://github.com/pytorch/pytorch/issues/81224)
|
[can't access your GPU cores from Docker containers](https://github.com/pytorch/pytorch/issues/81224)
|
||||||
@ -38,16 +42,19 @@ another environment with NVIDIA GPUs on-premises or in the cloud.
|
|||||||
|
|
||||||
#### Install [Docker](https://github.com/santisbon/guides#docker)
|
#### Install [Docker](https://github.com/santisbon/guides#docker)
|
||||||
|
|
||||||
On the Docker Desktop app, go to Preferences, Resources, Advanced. Increase the
|
On the [Docker Desktop app](https://docs.docker.com/get-docker/), go to
|
||||||
CPUs and Memory to avoid this
|
Preferences, Resources, Advanced. Increase the CPUs and Memory to avoid this
|
||||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues/342). You may need to
|
[Issue](https://github.com/invoke-ai/InvokeAI/issues/342). You may need to
|
||||||
increase Swap and Disk image size too.
|
increase Swap and Disk image size too.
|
||||||
|
|
||||||
#### Get a Huggingface-Token
|
#### Get a Huggingface-Token
|
||||||
|
|
||||||
Go to [Hugging Face](https://huggingface.co/settings/tokens), create a token and
|
Besides the Docker Agent you will need an Account on
|
||||||
temporary place it somewhere like a open texteditor window (but dont save it!,
|
[huggingface.co](https://huggingface.co/join).
|
||||||
only keep it open, we need it in the next step)
|
|
||||||
|
After you succesfully registered your account, go to
|
||||||
|
[huggingface.co/settings/tokens](https://huggingface.co/settings/tokens), create
|
||||||
|
a token and copy it, since you will need in for the next step.
|
||||||
|
|
||||||
### Setup
|
### Setup
|
||||||
|
|
||||||
@ -65,13 +72,14 @@ created in the last step.
|
|||||||
|
|
||||||
Some Suggestions of variables you may want to change besides the Token:
|
Some Suggestions of variables you may want to change besides the Token:
|
||||||
|
|
||||||
| Environment-Variable | Description |
|
| Environment-Variable | Default value | Description |
|
||||||
| ------------------------------------------------------------------- | ------------------------------------------------------------------------ |
|
| ------------------------- | ----------------------------- | ---------------------------------------------------------------------------- |
|
||||||
| `HUGGINGFACE_TOKEN="hg_aewirhghlawrgkjbarug2"` | This is the only required variable, without you can't get the checkpoint |
|
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without you can't get the checkpoint |
|
||||||
| `ARCH=aarch64` | if you are using a ARM based CPU |
|
| `ARCH` | x86_64 | if you are using a ARM based CPU |
|
||||||
| `INVOKEAI_TAG=yourname/invokeai:latest` | the Container Repository / Tag which will be used |
|
| `INVOKEAI_TAG` | invokeai-x86_64 | the Container Repository / Tag which will be used |
|
||||||
| `INVOKEAI_CONDA_ENV_FILE=environment-linux-aarch64.yml` | since environment.yml wouldn't work with aarch |
|
| `INVOKEAI_CONDA_ENV_FILE` | environment-lin-cuda.yml | since environment.yml wouldn't work with aarch |
|
||||||
| `INVOKEAI_GIT="-b branchname https://github.com/username/reponame"` | if you want to use your own fork |
|
| `INVOKEAI_GIT` | invoke-ai/InvokeAI | the repository to use |
|
||||||
|
| `INVOKEAI_BRANCH` | main | the branch to checkout |
|
||||||
|
|
||||||
#### Build the Image
|
#### Build the Image
|
||||||
|
|
||||||
@ -79,25 +87,41 @@ I provided a build script, which is located in `docker-build/build.sh` but still
|
|||||||
needs to be executed from the Repository root.
|
needs to be executed from the Repository root.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker-build/build.sh
|
./docker-build/build.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
The build Script not only builds the container, but also creates the docker
|
The build Script not only builds the container, but also creates the docker
|
||||||
volume if not existing yet, or if empty it will just download the models. When
|
volume if not existing yet, or if empty it will just download the models.
|
||||||
it is done you can run the container via the run script
|
|
||||||
|
#### Run the Container
|
||||||
|
|
||||||
|
After the build process is done, you can run the container via the provided
|
||||||
|
`docker-build/run.sh` script
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker-build/run.sh
|
./docker-build/run.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
When used without arguments, the container will start the website and provide
|
When used without arguments, the container will start the webserver and provide
|
||||||
you the link to open it. But if you want to use some other parameters you can
|
you the link to open it. But if you want to use some other parameters you can
|
||||||
also do so.
|
also do so.
|
||||||
|
|
||||||
|
!!! example ""
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./docker-build/run.sh --from_file tests/validate_pr_prompt.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
The output folder is located on the volume which is also used to store the model.
|
||||||
|
|
||||||
|
Find out more about available CLI-Parameters at [features/CLI.md](../features/CLI.md/#arguments)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
!!! warning "Deprecated"
|
!!! warning "Deprecated"
|
||||||
|
|
||||||
From here on it is the rest of the previous Docker-Docs, which will still
|
From here on you will find the the previous Docker-Docs, which will still
|
||||||
provide usefull informations for one or the other.
|
provide some usefull informations.
|
||||||
|
|
||||||
## Usage (time to have fun)
|
## Usage (time to have fun)
|
||||||
|
|
||||||
|
55
docs/installation/INSTALL_INVOKE.md
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
---
|
||||||
|
title: InvokeAI Installer
|
||||||
|
---
|
||||||
|
|
||||||
|
The InvokeAI installer is a shell script that will install InvokeAI onto a stock
|
||||||
|
computer running recent versions of Linux, MacOSX or Windows. It will leave you
|
||||||
|
with a version that runs a stable version of InvokeAI. When a new version of
|
||||||
|
InvokeAI is released, you will download and reinstall the new version.
|
||||||
|
|
||||||
|
If you wish to tinker with unreleased versions of InvokeAI that introduce
|
||||||
|
potentially unstable new features, you should consider using the
|
||||||
|
[source installer](INSTALL_SOURCE.md) or one of the
|
||||||
|
[manual install](INSTALL_MANUAL.md) methods.
|
||||||
|
|
||||||
|
!!! todo
|
||||||
|
|
||||||
|
Before you begin, make sure that you meet
|
||||||
|
the[hardware requirements](/#hardware-requirements) and has the
|
||||||
|
appropriate GPU drivers installed. In particular, if you are a Linux user with
|
||||||
|
an AMD GPU installed, you may need to install the
|
||||||
|
[ROCm-driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||||
|
|
||||||
|
Installation requires roughly 18G of free disk space to load the libraries and
|
||||||
|
recommended model weights files.
|
||||||
|
|
||||||
|
## Steps to Install
|
||||||
|
|
||||||
|
1. Download the
|
||||||
|
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest) of
|
||||||
|
InvokeAI's installer for your platform
|
||||||
|
|
||||||
|
2. Place the downloaded package someplace where you have plenty of HDD space,
|
||||||
|
and have full permissions (i.e. `~/` on Lin/Mac; your home folder on Windows)
|
||||||
|
|
||||||
|
3. Extract the 'InvokeAI' folder from the downloaded package
|
||||||
|
|
||||||
|
4. Open the extracted 'InvokeAI' folder
|
||||||
|
|
||||||
|
5. Double-click 'install.bat' (Windows), or 'install.sh' (Lin/Mac) (or run from
|
||||||
|
a terminal)
|
||||||
|
|
||||||
|
6. Follow the prompts
|
||||||
|
|
||||||
|
7. After installation, please run the 'invoke.bat' file (on Windows) or
|
||||||
|
'invoke.sh' file (on Linux/Mac) to start InvokeAI.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
If you run into problems during or after installation, the InvokeAI team is
|
||||||
|
available to help you. Either create an
|
||||||
|
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
||||||
|
make a request for help on the "bugs-and-support" channel of our
|
||||||
|
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
||||||
|
organization, but typically somebody will be available to help you within 24
|
||||||
|
hours, and often much sooner.
|
27
docs/installation/INSTALL_JUPYTER.md
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
title: Running InvokeAI on Google Colab using a Jupyter Notebook
|
||||||
|
---
|
||||||
|
|
||||||
|
# THIS NEEDS TO BE FLESHED OUT
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
We have a [Jupyter
|
||||||
|
notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable-Diffusion-local-Windows.ipynb)
|
||||||
|
with cell-by-cell installation steps. It will download the code in
|
||||||
|
this repo as one of the steps, so instead of cloning this repo, simply
|
||||||
|
download the notebook from the link above and load it up in VSCode
|
||||||
|
(with the appropriate extensions installed)/Jupyter/JupyterLab and
|
||||||
|
start running the cells one-by-one.
|
||||||
|
|
||||||
|
!!! Note "you will need NVIDIA drivers, Python 3.10, and Git installed beforehand"
|
||||||
|
|
||||||
|
## Walkthrough
|
||||||
|
|
||||||
|
## Updating to newer versions
|
||||||
|
|
||||||
|
### Updating the stable version
|
||||||
|
|
||||||
|
### Updating to the development version
|
||||||
|
|
||||||
|
## Troubleshooting
|
416
docs/installation/INSTALL_MANUAL.md
Normal file
@ -0,0 +1,416 @@
|
|||||||
|
---
|
||||||
|
title: Manual Installation
|
||||||
|
---
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
# :fontawesome-brands-linux: Linux | :fontawesome-brands-apple: macOS | :fontawesome-brands-windows: Windows
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
!!! warning "This is for advanced Users"
|
||||||
|
|
||||||
|
who are already expirienced with using conda or pip
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
You have two choices for manual installation, the [first one](#Conda_method)
|
||||||
|
based on the Anaconda3 package manager (`conda`), and
|
||||||
|
[a second one](#PIP_method) which uses basic Python virtual environment (`venv`)
|
||||||
|
commands and the PIP package manager. Both methods require you to enter commands
|
||||||
|
on the terminal, also known as the "console".
|
||||||
|
|
||||||
|
On Windows systems you are encouraged to install and use the
|
||||||
|
[Powershell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
|
||||||
|
which provides compatibility with Linux and Mac shells and nice features such as
|
||||||
|
command-line completion.
|
||||||
|
|
||||||
|
### Conda method
|
||||||
|
|
||||||
|
1. Check that your system meets the
|
||||||
|
[hardware requirements](index.md#Hardware_Requirements) and has the
|
||||||
|
appropriate GPU drivers installed. In particular, if you are a Linux user
|
||||||
|
with an AMD GPU installed, you may need to install the
|
||||||
|
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||||
|
|
||||||
|
InvokeAI does not yet support Windows machines with AMD GPUs due to the lack
|
||||||
|
of ROCm driver support on this platform.
|
||||||
|
|
||||||
|
To confirm that the appropriate drivers are installed, run `nvidia-smi` on
|
||||||
|
NVIDIA/CUDA systems, and `rocm-smi` on AMD systems. These should return
|
||||||
|
information about the installed video card.
|
||||||
|
|
||||||
|
Macintosh users with MPS acceleration, or anybody with a CPU-only system,
|
||||||
|
can skip this step.
|
||||||
|
|
||||||
|
2. You will need to install Anaconda3 and Git if they are not already
|
||||||
|
available. Use your operating system's preferred package manager, or
|
||||||
|
download the installers manually. You can find them here:
|
||||||
|
|
||||||
|
- [Anaconda3](https://www.anaconda.com/)
|
||||||
|
- [git](https://git-scm.com/downloads)
|
||||||
|
|
||||||
|
3. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
|
||||||
|
GitHub:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||||
|
```
|
||||||
|
|
||||||
|
This will create InvokeAI folder where you will follow the rest of the
|
||||||
|
steps.
|
||||||
|
|
||||||
|
4. Enter the newly-created InvokeAI folder:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd InvokeAI
|
||||||
|
```
|
||||||
|
|
||||||
|
From this step forward make sure that you are working in the InvokeAI
|
||||||
|
directory!
|
||||||
|
|
||||||
|
5. Select the appropriate environment file:
|
||||||
|
|
||||||
|
We have created a series of environment files suited for different operating
|
||||||
|
systems and GPU hardware. They are located in the
|
||||||
|
`environments-and-requirements` directory:
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
|
||||||
|
| filename | OS |
|
||||||
|
| :----------------------: | :----------------------------: |
|
||||||
|
| environment-lin-amd.yml | Linux with an AMD (ROCm) GPU |
|
||||||
|
| environment-lin-cuda.yml | Linux with an NVIDIA CUDA GPU |
|
||||||
|
| environment-mac.yml | Macintosh |
|
||||||
|
| environment-win-cuda.yml | Windows with an NVIDA CUDA GPU |
|
||||||
|
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
Choose the appropriate environment file for your system and link or copy it
|
||||||
|
to `environment.yml` in InvokeAI's top-level directory. To do so, run
|
||||||
|
following command from the repository-root:
|
||||||
|
|
||||||
|
!!! Example ""
|
||||||
|
|
||||||
|
=== "Macintosh and Linux"
|
||||||
|
|
||||||
|
!!! todo "Replace `xxx` and `yyy` with the appropriate OS and GPU codes as seen in the table above"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ln -sf environments-and-requirements/environment-xxx-yyy.yml environment.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
When this is done, confirm that a file `environment.yml` has been linked in
|
||||||
|
the InvokeAI root directory and that it points to the correct file in the
|
||||||
|
`environments-and-requirements`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ls -la
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Windows"
|
||||||
|
|
||||||
|
!!! todo " Since it requires admin privileges to create links, we will use the copy command to create your `environment.yml`"
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
copy environments-and-requirements\environment-win-cuda.yml environment.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
Afterwards verify that the file `environment.yml` has been created, either via the
|
||||||
|
explorer or by using the command `dir` from the terminal
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
dir
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
|
||||||
|
|
||||||
|
6. Create the conda environment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conda env update
|
||||||
|
```
|
||||||
|
|
||||||
|
This will create a new environment named `invokeai` and install all InvokeAI
|
||||||
|
dependencies into it. If something goes wrong you should take a look at
|
||||||
|
[troubleshooting](#troubleshooting).
|
||||||
|
|
||||||
|
7. Activate the `invokeai` environment:
|
||||||
|
|
||||||
|
In order to use the newly created environment you will first need to
|
||||||
|
activate it
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conda activate invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
Your command-line prompt should change to indicate that `invokeai` is active
|
||||||
|
by prepending `(invokeai)`.
|
||||||
|
|
||||||
|
8. Pre-Load the model weights files:
|
||||||
|
|
||||||
|
!!! tip
|
||||||
|
|
||||||
|
If you have already downloaded the weights file(s) for another Stable
|
||||||
|
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||||
|
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||||
|
process for this is described in [here](INSTALLING_MODELS.md).
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python scripts/preload_models.py
|
||||||
|
```
|
||||||
|
|
||||||
|
The script `preload_models.py` will interactively guide you through the
|
||||||
|
process of downloading and installing the weights files needed for InvokeAI.
|
||||||
|
Note that the main Stable Diffusion weights file is protected by a license
|
||||||
|
agreement that you have to agree to. The script will list the steps you need
|
||||||
|
to take to create an account on the site that hosts the weights files,
|
||||||
|
accept the agreement, and provide an access token that allows InvokeAI to
|
||||||
|
legally download and install the weights files.
|
||||||
|
|
||||||
|
If you get an error message about a module not being installed, check that
|
||||||
|
the `invokeai` environment is active and if not, repeat step 5.
|
||||||
|
|
||||||
|
9. Run the command-line- or the web- interface:
|
||||||
|
|
||||||
|
!!! example ""
|
||||||
|
|
||||||
|
!!! warning "Make sure that the conda environment is activated, which should create `(invokeai)` in front of your prompt!"
|
||||||
|
|
||||||
|
=== "CLI"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python scripts/invoke.py
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "local Webserver"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python scripts/invoke.py --web
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Public Webserver"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python scripts/invoke.py --web --host 0.0.0.0
|
||||||
|
```
|
||||||
|
|
||||||
|
If you choose the run the web interface, point your browser at
|
||||||
|
http://localhost:9090 in order to load the GUI.
|
||||||
|
|
||||||
|
10. Render away!
|
||||||
|
|
||||||
|
Browse the [features](../features/CLI.md) section to learn about all the things you
|
||||||
|
can do with InvokeAI.
|
||||||
|
|
||||||
|
Note that some GPUs are slow to warm up. In particular, when using an AMD
|
||||||
|
card with the ROCm driver, you may have to wait for over a minute the first
|
||||||
|
time you try to generate an image. Fortunately, after the warm up period
|
||||||
|
rendering will be fast.
|
||||||
|
|
||||||
|
11. Subsequently, to relaunch the script, be sure to run "conda activate
|
||||||
|
invokeai", enter the `InvokeAI` directory, and then launch the invoke
|
||||||
|
script. If you forget to activate the 'invokeai' environment, the script
|
||||||
|
will fail with multiple `ModuleNotFound` errors.
|
||||||
|
|
||||||
|
## Updating to newer versions of the script
|
||||||
|
|
||||||
|
This distribution is changing rapidly. If you used the `git clone` method
|
||||||
|
(step 5) to download the InvokeAI directory, then to update to the latest and
|
||||||
|
greatest version, launch the Anaconda window, enter `InvokeAI` and type:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git pull
|
||||||
|
conda env update
|
||||||
|
python scripts/preload_models.py --no-interactive #optional
|
||||||
|
```
|
||||||
|
|
||||||
|
This will bring your local copy into sync with the remote one. The last step may
|
||||||
|
be needed to take advantage of new features or released models. The
|
||||||
|
`--no-interactive` flag will prevent the script from prompting you to download
|
||||||
|
the big Stable Diffusion weights files.
|
||||||
|
|
||||||
|
## pip Install
|
||||||
|
|
||||||
|
To install InvokeAI with only the PIP package manager, please follow these
|
||||||
|
steps:
|
||||||
|
|
||||||
|
1. Make sure you are using Python 3.9 or higher. The rest of the install
|
||||||
|
procedure depends on this:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -V
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Install the `virtualenv` tool if you don't have it already:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install virtualenv
|
||||||
|
```
|
||||||
|
|
||||||
|
3. From within the InvokeAI top-level directory, create and activate a virtual
|
||||||
|
environment named `invokeai`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
virtualenv invokeai
|
||||||
|
source invokeai/bin/activate
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Pick the correct `requirements*.txt` file for your hardware and operating
|
||||||
|
system.
|
||||||
|
|
||||||
|
We have created a series of environment files suited for different operating
|
||||||
|
systems and GPU hardware. They are located in the
|
||||||
|
`environments-and-requirements` directory:
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
|
||||||
|
| filename | OS |
|
||||||
|
| :---------------------------------: | :-------------------------------------------------------------: |
|
||||||
|
| requirements-lin-amd.txt | Linux with an AMD (ROCm) GPU |
|
||||||
|
| requirements-lin-arm64.txt | Linux running on arm64 systems |
|
||||||
|
| requirements-lin-cuda.txt | Linux with an NVIDIA (CUDA) GPU |
|
||||||
|
| requirements-mac-mps-cpu.txt | Macintoshes with MPS acceleration |
|
||||||
|
| requirements-lin-win-colab-cuda.txt | Windows with an NVIDA (CUDA) GPU<br>(supports Google Colab too) |
|
||||||
|
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
Select the appropriate requirements file, and make a link to it from
|
||||||
|
`requirements.txt` in the top-level InvokeAI directory. The command to do
|
||||||
|
this from the top-level directory is:
|
||||||
|
|
||||||
|
!!! example ""
|
||||||
|
|
||||||
|
=== "Macintosh and Linux"
|
||||||
|
|
||||||
|
!!! info "Replace `xxx` and `yyy` with the appropriate OS and GPU codes."
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ln -sf environments-and-requirements/requirements-xxx-yyy.txt requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Windows"
|
||||||
|
|
||||||
|
!!! info "on Windows, admin privileges are required to make links, so we use the copy command instead"
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
copy environments-and-requirements\requirements-lin-win-colab-cuda.txt requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! warning
|
||||||
|
|
||||||
|
Please do not link or copy `environments-and-requirements/requirements-base.txt`.
|
||||||
|
This is a base requirements file that does not have the platform-specific
|
||||||
|
libraries. Also, be sure to link or copy the platform-specific file to
|
||||||
|
a top-level file named `requirements.txt` as shown here. Running pip on
|
||||||
|
a requirements file in a subdirectory will not work as expected.
|
||||||
|
|
||||||
|
When this is done, confirm that a file named `requirements.txt` has been
|
||||||
|
created in the InvokeAI root directory and that it points to the correct
|
||||||
|
file in `environments-and-requirements`.
|
||||||
|
|
||||||
|
5. Run PIP
|
||||||
|
|
||||||
|
Be sure that the `invokeai` environment is active before doing this:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install --prefer-binary -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
Here are some common issues and their suggested solutions.
|
||||||
|
|
||||||
|
### Conda
|
||||||
|
|
||||||
|
#### Conda fails before completing `conda update`
|
||||||
|
|
||||||
|
The usual source of these errors is a package incompatibility. While we have
|
||||||
|
tried to minimize these, over time packages get updated and sometimes introduce
|
||||||
|
incompatibilities.
|
||||||
|
|
||||||
|
We suggest that you search
|
||||||
|
[Issues](https://github.com/invoke-ai/InvokeAI/issues) or the "bugs-and-support"
|
||||||
|
channel of the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
|
||||||
|
|
||||||
|
You may also try to install the broken packages manually using PIP. To do this,
|
||||||
|
activate the `invokeai` environment, and run `pip install` with the name and
|
||||||
|
version of the package that is causing the incompatibility. For example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install test-tube==0.7.5
|
||||||
|
```
|
||||||
|
|
||||||
|
You can keep doing this until all requirements are satisfied and the `invoke.py`
|
||||||
|
script runs without errors. Please report to
|
||||||
|
[Issues](https://github.com/invoke-ai/InvokeAI/issues) what you were able to do
|
||||||
|
to work around the problem so that others can benefit from your investigation.
|
||||||
|
|
||||||
|
#### `preload_models.py` or `invoke.py` crashes at an early stage
|
||||||
|
|
||||||
|
This is usually due to an incomplete or corrupted Conda install. Make sure you
|
||||||
|
have linked to the correct environment file and run `conda update` again.
|
||||||
|
|
||||||
|
If the problem persists, a more extreme measure is to clear Conda's caches and
|
||||||
|
remove the `invokeai` environment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conda deactivate
|
||||||
|
conda env remove -n invokeai
|
||||||
|
conda clean -a
|
||||||
|
conda update
|
||||||
|
```
|
||||||
|
|
||||||
|
This removes all cached library files, including ones that may have been
|
||||||
|
corrupted somehow. (This is not supposed to happen, but does anyway).
|
||||||
|
|
||||||
|
#### `invoke.py` crashes at a later stage
|
||||||
|
|
||||||
|
If the CLI or web site had been working ok, but something unexpected happens
|
||||||
|
later on during the session, you've encountered a code bug that is probably
|
||||||
|
unrelated to an install issue. Please search
|
||||||
|
[Issues](https://github.com/invoke-ai/InvokeAI/issues), file a bug report, or
|
||||||
|
ask for help on [Discord](https://discord.gg/ZmtBAhwWhy)
|
||||||
|
|
||||||
|
#### My renders are running very slowly
|
||||||
|
|
||||||
|
You may have installed the wrong torch (machine learning) package, and the
|
||||||
|
system is running on CPU rather than the GPU. To check, look at the log messages
|
||||||
|
that appear when `invoke.py` is first starting up. One of the earlier lines
|
||||||
|
should say `Using device type cuda`. On AMD systems, it will also say "cuda",
|
||||||
|
and on Macintoshes, it should say "mps". If instead the message says it is
|
||||||
|
running on "cpu", then you may need to install the correct torch library.
|
||||||
|
|
||||||
|
You may be able to fix this by installing a different torch library. Here are
|
||||||
|
the magic incantations for Conda and PIP.
|
||||||
|
|
||||||
|
!!! todo "For CUDA systems"
|
||||||
|
|
||||||
|
- conda
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conda install pytorch torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia
|
||||||
|
```
|
||||||
|
|
||||||
|
- pip
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! todo "For AMD systems"
|
||||||
|
|
||||||
|
- conda
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conda activate invokeai
|
||||||
|
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||||
|
```
|
||||||
|
|
||||||
|
- pip
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||||
|
```
|
||||||
|
|
||||||
|
More information and troubleshooting tips can be found at https://pytorch.org.
|
17
docs/installation/INSTALL_PCP.md
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
title: Installing InvokeAI with the Pre-Compiled PIP Installer
|
||||||
|
---
|
||||||
|
|
||||||
|
# THIS NEEDS TO BE FLESHED OUT
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
## Walkthrough
|
||||||
|
|
||||||
|
## Updating to newer versions
|
||||||
|
|
||||||
|
### Updating the stable version
|
||||||
|
|
||||||
|
### Updating to the development version
|
||||||
|
|
||||||
|
## Troubleshooting
|
156
docs/installation/INSTALL_SOURCE.md
Normal file
@ -0,0 +1,156 @@
|
|||||||
|
---
|
||||||
|
title: Source Installer
|
||||||
|
---
|
||||||
|
|
||||||
|
# The InvokeAI Source Installer
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
The source installer is a shell script that attempts to automate every step
|
||||||
|
needed to install and run InvokeAI on a stock computer running recent versions
|
||||||
|
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
||||||
|
version of InvokeAI with the option to upgrade to experimental versions later.
|
||||||
|
It is not as foolproof as the [InvokeAI installer](INSTALL_INVOKE.md)
|
||||||
|
|
||||||
|
Before you begin, make sure that you meet the
|
||||||
|
[hardware requirements](index.md#Hardware_Requirements) and has the appropriate
|
||||||
|
GPU drivers installed. In particular, if you are a Linux user with an AMD GPU
|
||||||
|
installed, you may need to install the
|
||||||
|
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||||
|
|
||||||
|
Installation requires roughly 18G of free disk space to load the libraries and
|
||||||
|
recommended model weights files.
|
||||||
|
|
||||||
|
## Walk through
|
||||||
|
|
||||||
|
Though there are multiple steps, there really is only one click involved to kick
|
||||||
|
off the process.
|
||||||
|
|
||||||
|
1. The source installer is distributed in ZIP files. Go to the
|
||||||
|
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||||
|
look for a series of files named:
|
||||||
|
|
||||||
|
- invokeAI-src-installer-mac.zip
|
||||||
|
- invokeAI-src-installer-windows.zip
|
||||||
|
- invokeAI-src-installer-linux.zip
|
||||||
|
|
||||||
|
Download the one that is appropriate for your operating system.
|
||||||
|
|
||||||
|
2. Unpack the zip file into a directory that has at least 18G of free space. Do
|
||||||
|
_not_ unpack into a directory that has an earlier version of InvokeAI.
|
||||||
|
|
||||||
|
This will create a new directory named "InvokeAI". This example shows how
|
||||||
|
this would look using the `unzip` command-line tool, but you may use any
|
||||||
|
graphical or command-line Zip extractor:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
C:\Documents\Linco> unzip invokeAI-windows.zip
|
||||||
|
Archive: C: \Linco\Downloads\invokeAI-linux.zip
|
||||||
|
creating: invokeAI\
|
||||||
|
inflating: invokeAI\install.bat
|
||||||
|
inflating: invokeAI\readme.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
3. If you are using a desktop GUI, double-click the installer file. It will be
|
||||||
|
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||||
|
Macintosh systems.
|
||||||
|
|
||||||
|
4. Alternatively, form the command line, run the shell script or .bat file:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
C:\Documents\Linco> cd invokeAI
|
||||||
|
C:\Documents\Linco\invokeAI> install.bat
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Sit back and let the install script work. It will install various binary
|
||||||
|
requirements including Conda, Git and Python, then download the current
|
||||||
|
InvokeAI code and install it along with its dependencies.
|
||||||
|
|
||||||
|
6. After installation completes, the installer will launch a script called
|
||||||
|
`preload_models.py`, which will guide you through the first-time process of
|
||||||
|
selecting one or more Stable Diffusion model weights files, downloading and
|
||||||
|
configuring them.
|
||||||
|
|
||||||
|
Note that the main Stable Diffusion weights file is protected by a license
|
||||||
|
agreement that you must agree to in order to use. The script will list the
|
||||||
|
steps you need to take to create an account on the official site that hosts
|
||||||
|
the weights files, accept the agreement, and provide an access token that
|
||||||
|
allows InvokeAI to legally download and install the weights files.
|
||||||
|
|
||||||
|
If you have already downloaded the weights file(s) for another Stable
|
||||||
|
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||||
|
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||||
|
process for this is described in [Installing Models](INSTALLING_MODELS.md).
|
||||||
|
|
||||||
|
7. The script will now exit and you'll be ready to generate some images. The
|
||||||
|
invokeAI directory will contain numerous files. Look for a shell script
|
||||||
|
named `invoke.sh` (Linux/Mac) or `invoke.bat` (Windows). Launch the script
|
||||||
|
by double-clicking it or typing its name at the command-line:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
C:\Documents\Linco> cd invokeAI
|
||||||
|
C:\Documents\Linco\invokeAI> invoke.bat
|
||||||
|
```
|
||||||
|
|
||||||
|
The `invoke.bat` (`invoke.sh`) script will give you the choice of starting (1)
|
||||||
|
the command-line interface, or (2) the web GUI. If you start the latter, you can
|
||||||
|
load the user interface by pointing your browser at http://localhost:9090.
|
||||||
|
|
||||||
|
The `invoke` script also offers you a third option labeled "open the developer
|
||||||
|
console". If you choose this option, you will be dropped into a command-line
|
||||||
|
interface in which you can run python commands directly, access developer tools,
|
||||||
|
and launch InvokeAI with customized options. To do the latter, you would launch
|
||||||
|
the script `scripts/invoke.py` as shown in this example:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
python scripts/invoke.py --web --max_load_models=3 \
|
||||||
|
--model=waifu-1.3 --steps=30 --outdir=C:/Documents/AIPhotos
|
||||||
|
```
|
||||||
|
|
||||||
|
These options are described in detail in the
|
||||||
|
[Command-Line Interface](../features/CLI.md) documentation.
|
||||||
|
|
||||||
|
## Updating to newer versions
|
||||||
|
|
||||||
|
This section describes how to update InvokeAI to new versions of the software.
|
||||||
|
|
||||||
|
### Updating the stable version
|
||||||
|
|
||||||
|
This distribution is changing rapidly, and we add new features on a daily basis.
|
||||||
|
To update to the latest released version (recommended), run the `update.sh`
|
||||||
|
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
||||||
|
release and re-run the `preload_models` script to download any updated models
|
||||||
|
files that may be needed. You can also use this to add additional models that
|
||||||
|
you did not select at installation time.
|
||||||
|
|
||||||
|
### Updating to the development version
|
||||||
|
|
||||||
|
There may be times that there is a feature in the `development` branch of
|
||||||
|
InvokeAI that you'd like to take advantage of. Or perhaps there is a branch that
|
||||||
|
corrects an annoying bug. To do this, you will use the developer's console.
|
||||||
|
|
||||||
|
From within the invokeAI directory, run the command `invoke.sh` (Linux/Mac) or
|
||||||
|
`invoke.bat` (Windows) and selection option (3) to open the developers console.
|
||||||
|
Then run the following command to get the `development branch`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git checkout development
|
||||||
|
git pull
|
||||||
|
conda env update
|
||||||
|
```
|
||||||
|
|
||||||
|
You can now close the developer console and run `invoke` as before. If you get
|
||||||
|
complaints about missing models, then you may need to do the additional step of
|
||||||
|
running `preload_models.py`. This happens relatively infrequently. To do this,
|
||||||
|
simply open up the developer's console again and type
|
||||||
|
`python scripts/preload_models.py`.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
If you run into problems during or after installation, the InvokeAI team is
|
||||||
|
available to help you. Either create an
|
||||||
|
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
||||||
|
make a request for help on the "bugs-and-support" channel of our
|
||||||
|
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
||||||
|
organization, but typically somebody will be available to help you within 24
|
||||||
|
hours, and often much sooner.
|
59
docs/installation/index.md
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
---
|
||||||
|
title: Overview
|
||||||
|
---
|
||||||
|
|
||||||
|
We offer several ways to install InvokeAI, each one suited to your
|
||||||
|
experience and preferences.
|
||||||
|
|
||||||
|
1. [InvokeAI installer](INSTALL_INVOKE.md)
|
||||||
|
|
||||||
|
This is a installer script that installs InvokeAI and all the
|
||||||
|
third party libraries it depends on. When a new version of
|
||||||
|
InvokeAI is released, you will download and reinstall the new
|
||||||
|
version.
|
||||||
|
|
||||||
|
This installer is designed for people who want the system to "just
|
||||||
|
work", don't have an interest in tinkering with it, and do not
|
||||||
|
care about upgrading to unreleased experimental features.
|
||||||
|
|
||||||
|
*Note that this script has difficulty on some Macintosh machines
|
||||||
|
that have previously been used for Python development due to
|
||||||
|
conflicting development tools versions. Mac developers may wish
|
||||||
|
to try method (2) or one of the manual methods instead.
|
||||||
|
|
||||||
|
2. [Source code installer](INSTALL_SOURCE.md)
|
||||||
|
|
||||||
|
This is a script that will install InvokeAI and all its essential
|
||||||
|
third party libraries. In contrast to the previous installer, it
|
||||||
|
includes access to a "developer console" which will allow you to
|
||||||
|
access experimental features on the development branch.
|
||||||
|
|
||||||
|
This method is recommended for individuals who are wish to stay
|
||||||
|
on the cutting edge of InvokeAI development and are not afraid
|
||||||
|
of occasional breakage.
|
||||||
|
|
||||||
|
3. [Manual Installation](INSTALL_MANUAL.md)
|
||||||
|
|
||||||
|
In this method you will manually run the commands needed to install
|
||||||
|
InvokeAI and its dependencies. We offer two recipes: one suited to
|
||||||
|
those who prefer the `conda` tool, and one suited to those who prefer
|
||||||
|
`pip` and Python virtual environments.
|
||||||
|
|
||||||
|
This method is recommended for users who have previously used `conda`
|
||||||
|
or `pip` in the past, developers, and anyone who wishes to remain on
|
||||||
|
the cutting edge of future InvokeAI development and is willing to put
|
||||||
|
up with occasional glitches and breakage.
|
||||||
|
|
||||||
|
4. [Docker Installation](INSTALL_DOCKER.md)
|
||||||
|
|
||||||
|
We also offer a method for creating Docker containers containing
|
||||||
|
InvokeAI and its dependencies. This method is recommended for
|
||||||
|
individuals with experience with Docker containers and understand
|
||||||
|
the pluses and minuses of a container-based install.
|
||||||
|
|
||||||
|
5. [Jupyter Notebooks Installation](INSTALL_JUPYTER.md)
|
||||||
|
|
||||||
|
This method is suitable for running InvokeAI on a Google Colab
|
||||||
|
account. It is recommended for individuals who have previously
|
||||||
|
worked on the Colab and are comfortable with the Jupyter notebook
|
||||||
|
environment.
|
@ -42,14 +42,25 @@ title: Manual Installation, Linux
|
|||||||
```
|
```
|
||||||
|
|
||||||
5. Use anaconda to copy necessary python packages, create a new python
|
5. Use anaconda to copy necessary python packages, create a new python
|
||||||
environment named `invokeai` and activate the environment.
|
environment named `invokeai` and then activate the environment.
|
||||||
|
|
||||||
```bash
|
!!! todo "For systems with a CUDA (Nvidia) card:"
|
||||||
(base) rm -rf src # (this is a precaution in case there is already a src directory)
|
|
||||||
(base) ~/InvokeAI$ conda env create
|
```bash
|
||||||
(base) ~/InvokeAI$ conda activate invokeai
|
(base) rm -rf src # (this is a precaution in case there is already a src directory)
|
||||||
(invokeai) ~/InvokeAI$
|
(base) ~/InvokeAI$ conda env create -f environment-cuda.yml
|
||||||
```
|
(base) ~/InvokeAI$ conda activate invokeai
|
||||||
|
(invokeai) ~/InvokeAI$
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! todo "For systems with an AMD card (using ROCm driver):"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
(base) rm -rf src # (this is a precaution in case there is already a src directory)
|
||||||
|
(base) ~/InvokeAI$ conda env create -f environment-AMD.yml
|
||||||
|
(base) ~/InvokeAI$ conda activate invokeai
|
||||||
|
(invokeai) ~/InvokeAI$
|
||||||
|
```
|
||||||
|
|
||||||
After these steps, your command prompt will be prefixed by `(invokeai)` as
|
After these steps, your command prompt will be prefixed by `(invokeai)` as
|
||||||
shown above.
|
shown above.
|
@ -13,22 +13,9 @@ one of the steps, so instead of cloning this repo, simply download the notebook
|
|||||||
from the link above and load it up in VSCode (with the appropriate extensions
|
from the link above and load it up in VSCode (with the appropriate extensions
|
||||||
installed)/Jupyter/JupyterLab and start running the cells one-by-one.
|
installed)/Jupyter/JupyterLab and start running the cells one-by-one.
|
||||||
|
|
||||||
Note that you will need NVIDIA drivers, Python 3.10, and Git installed
|
Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehand.
|
||||||
beforehand - simplified
|
|
||||||
[step-by-step instructions](https://github.com/invoke-ai/InvokeAI/wiki/Easy-peasy-Windows-install)
|
|
||||||
are available in the wiki (you'll only need steps 1, 2, & 3 ).
|
|
||||||
|
|
||||||
## **Manual Install**
|
## **Manual Install with Conda**
|
||||||
|
|
||||||
### **pip**
|
|
||||||
|
|
||||||
See
|
|
||||||
[Easy-peasy Windows install](https://github.com/invoke-ai/InvokeAI/wiki/Easy-peasy-Windows-install)
|
|
||||||
in the wiki
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### **Conda**
|
|
||||||
|
|
||||||
1. Install Anaconda3 (miniconda3 version) from [here](https://docs.anaconda.com/anaconda/install/windows/)
|
1. Install Anaconda3 (miniconda3 version) from [here](https://docs.anaconda.com/anaconda/install/windows/)
|
||||||
|
|
||||||
@ -52,23 +39,29 @@ in the wiki
|
|||||||
cd InvokeAI
|
cd InvokeAI
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Run the following two commands:
|
6. Run the following commands:
|
||||||
|
|
||||||
```batch title="step 6a"
|
!!! todo "For systems with a CUDA (Nvidia) card:"
|
||||||
conda env create
|
|
||||||
```
|
|
||||||
|
|
||||||
```batch title="step 6b"
|
```bash
|
||||||
conda activate invokeai
|
rmdir src # (this is a precaution in case there is already a src directory)
|
||||||
```
|
conda env create -f environment-cuda.yml
|
||||||
|
conda activate invokeai
|
||||||
|
(invokeai)>
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! todo "For systems with an AMD card (using ROCm driver):"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
rmdir src # (this is a precaution in case there is already a src directory)
|
||||||
|
conda env create -f environment-AMD.yml
|
||||||
|
conda activate invokeai
|
||||||
|
(invokeai)>
|
||||||
|
```
|
||||||
|
|
||||||
This will install all python requirements and activate the "invokeai" environment
|
This will install all python requirements and activate the "invokeai" environment
|
||||||
which sets PATH and other environment variables properly.
|
which sets PATH and other environment variables properly.
|
||||||
|
|
||||||
Note that the long form of the first command is `conda env create -f environment.yml`. If the
|
|
||||||
environment file isn't specified, conda will default to `environment.yml`. You will need
|
|
||||||
to provide the `-f` option if you wish to load a different environment file at any point.
|
|
||||||
|
|
||||||
7. Load the big stable diffusion weights files and a couple of smaller machine-learning models:
|
7. Load the big stable diffusion weights files and a couple of smaller machine-learning models:
|
||||||
|
|
||||||
```bash
|
```bash
|
@ -9,11 +9,11 @@ experience in this fork.
|
|||||||
|
|
||||||
We thank them for all of their time and hard work.
|
We thank them for all of their time and hard work.
|
||||||
|
|
||||||
## **Original Author:**
|
## **Original Author**
|
||||||
|
|
||||||
- [Lincoln D. Stein](mailto:lincoln.stein@gmail.com)
|
- [Lincoln D. Stein](mailto:lincoln.stein@gmail.com)
|
||||||
|
|
||||||
## **Contributions by:**
|
## **Contributions by**
|
||||||
|
|
||||||
- [Sean McLellan](https://github.com/Oceanswave)
|
- [Sean McLellan](https://github.com/Oceanswave)
|
||||||
- [Kevin Gibbons](https://github.com/bakkot)
|
- [Kevin Gibbons](https://github.com/bakkot)
|
||||||
@ -62,7 +62,7 @@ We thank them for all of their time and hard work.
|
|||||||
- [psychedelicious](https://github.com/psychedelicious)
|
- [psychedelicious](https://github.com/psychedelicious)
|
||||||
- [damian0815](https://github.com/damian0815)
|
- [damian0815](https://github.com/damian0815)
|
||||||
|
|
||||||
## **Original CompVis Authors:**
|
## **Original CompVis Authors**
|
||||||
|
|
||||||
- [Robin Rombach](https://github.com/rromb)
|
- [Robin Rombach](https://github.com/rromb)
|
||||||
- [Patrick von Platen](https://github.com/patrickvonplaten)
|
- [Patrick von Platen](https://github.com/patrickvonplaten)
|
||||||
|
@ -1,65 +0,0 @@
|
|||||||
name: invokeai
|
|
||||||
channels:
|
|
||||||
- pytorch
|
|
||||||
- conda-forge
|
|
||||||
dependencies:
|
|
||||||
- python=3.9.13
|
|
||||||
- pip=22.2.2
|
|
||||||
|
|
||||||
- pytorch=1.12.1
|
|
||||||
- torchvision=0.13.1
|
|
||||||
|
|
||||||
# I suggest to keep the other deps sorted for convenience.
|
|
||||||
# To determine what the latest versions should be, run:
|
|
||||||
#
|
|
||||||
# ```shell
|
|
||||||
# sed -E 's/invokeai/invokeai-updated/;20,99s/- ([^=]+)==.+/- \1/' environment-mac.yml > environment-mac-updated.yml
|
|
||||||
# CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac-updated.yml && conda list -n invokeai-updated | awk ' {print " - " $1 "==" $2;} '
|
|
||||||
# ```
|
|
||||||
|
|
||||||
- albumentations=1.2.1
|
|
||||||
- coloredlogs=15.0.1
|
|
||||||
- diffusers=0.6.0
|
|
||||||
- einops=0.4.1
|
|
||||||
- grpcio=1.46.4
|
|
||||||
- humanfriendly=10.0
|
|
||||||
- imageio=2.21.2
|
|
||||||
- imageio-ffmpeg=0.4.7
|
|
||||||
- imgaug=0.4.0
|
|
||||||
- kornia=0.6.7
|
|
||||||
- mpmath=1.2.1
|
|
||||||
- nomkl # arm64 has only 1.0 while x64 needs 3.0
|
|
||||||
- numpy=1.23.4
|
|
||||||
- omegaconf=2.1.1
|
|
||||||
- openh264=2.3.0
|
|
||||||
- onnx=1.12.0
|
|
||||||
- onnxruntime=1.12.1
|
|
||||||
- pudb=2022.1
|
|
||||||
- pytorch-lightning=1.7.7
|
|
||||||
- scipy=1.9.3
|
|
||||||
- streamlit=1.12.2
|
|
||||||
- sympy=1.10.1
|
|
||||||
- tensorboard=2.10.0
|
|
||||||
- torchmetrics=0.10.1
|
|
||||||
- py-opencv=4.6.0
|
|
||||||
- flask=2.1.3
|
|
||||||
- flask-socketio=5.3.0
|
|
||||||
- flask-cors=3.0.10
|
|
||||||
- eventlet=0.33.1
|
|
||||||
- protobuf=3.20.1
|
|
||||||
- send2trash=1.8.0
|
|
||||||
- transformers=4.23.1
|
|
||||||
- torch-fidelity=0.3.0
|
|
||||||
- pip:
|
|
||||||
- getpass_asterisk
|
|
||||||
- dependency_injector==4.40.0
|
|
||||||
- realesrgan==0.2.5.0
|
|
||||||
- test-tube==0.7.5
|
|
||||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
|
||||||
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
|
||||||
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
|
||||||
- -e git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan
|
|
||||||
- -e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
|
||||||
- -e .
|
|
||||||
variables:
|
|
||||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
|
@ -3,43 +3,43 @@ channels:
|
|||||||
- pytorch
|
- pytorch
|
||||||
- conda-forge
|
- conda-forge
|
||||||
dependencies:
|
dependencies:
|
||||||
- python>=3.9
|
- albumentations=0.4.3
|
||||||
- pip>=20.3
|
|
||||||
- cudatoolkit
|
- cudatoolkit
|
||||||
- pytorch
|
- einops=0.3.0
|
||||||
- torchvision
|
- eventlet
|
||||||
- numpy=1.19
|
- flask-socketio=5.3.0
|
||||||
- imageio=2.9.0
|
|
||||||
- opencv=4.6.0
|
|
||||||
- getpass_asterisk
|
|
||||||
- pillow=8.*
|
|
||||||
- flask=2.1.*
|
- flask=2.1.*
|
||||||
- flask_cors=3.0.10
|
- flask_cors=3.0.10
|
||||||
- flask-socketio=5.3.0
|
|
||||||
- send2trash=1.8.0
|
|
||||||
- eventlet
|
|
||||||
- albumentations=0.4.3
|
|
||||||
- pudb=2019.2
|
|
||||||
- imageio-ffmpeg=0.4.2
|
- imageio-ffmpeg=0.4.2
|
||||||
- pytorch-lightning=1.7.7
|
- imageio=2.9.0
|
||||||
- streamlit
|
|
||||||
- einops=0.3.0
|
|
||||||
- kornia=0.6
|
- kornia=0.6
|
||||||
- torchmetrics=0.7.0
|
- numpy=1.19
|
||||||
- transformers=4.21.3
|
- opencv=4.6.0
|
||||||
- torch-fidelity=0.3.0
|
- pillow=8.*
|
||||||
|
- pip>=22.2.2
|
||||||
|
- pudb=2019.2
|
||||||
|
- python=3.9.*
|
||||||
|
- pytorch
|
||||||
|
- pytorch-lightning=1.7.7
|
||||||
|
- send2trash=1.8.0
|
||||||
|
- streamlit
|
||||||
- tokenizers>=0.11.1,!=0.11.3,<0.13
|
- tokenizers>=0.11.1,!=0.11.3,<0.13
|
||||||
|
- torch-fidelity=0.3.0
|
||||||
|
- torchmetrics=0.7.0
|
||||||
|
- torchvision
|
||||||
|
- transformers=4.21.3
|
||||||
- pip:
|
- pip:
|
||||||
- omegaconf==2.1.1
|
|
||||||
- realesrgan==0.2.5.0
|
|
||||||
- test-tube>=0.7.5
|
|
||||||
- pyreadline3
|
|
||||||
- dependency_injector==4.40.0
|
- dependency_injector==4.40.0
|
||||||
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
- getpass_asterisk
|
||||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
- gfpgan
|
||||||
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
- omegaconf==2.1.1
|
||||||
- -e git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan
|
- pyreadline3
|
||||||
- -e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
- realesrgan
|
||||||
|
- taming-transformers-rom1504
|
||||||
|
- test-tube>=0.7.5
|
||||||
|
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||||
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
- -e .
|
- -e .
|
||||||
variables:
|
variables:
|
||||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
45
environments-and-requirements/environment-lin-amd.yml
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
name: invokeai
|
||||||
|
channels:
|
||||||
|
- pytorch
|
||||||
|
- conda-forge
|
||||||
|
- defaults
|
||||||
|
dependencies:
|
||||||
|
- python>=3.9
|
||||||
|
- pip=22.2.2
|
||||||
|
- numpy=1.23.3
|
||||||
|
- pip:
|
||||||
|
- --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||||
|
- albumentations==0.4.3
|
||||||
|
- dependency_injector==4.40.0
|
||||||
|
- diffusers==0.6.0
|
||||||
|
- einops==0.3.0
|
||||||
|
- eventlet
|
||||||
|
- flask==2.1.3
|
||||||
|
- flask_cors==3.0.10
|
||||||
|
- flask_socketio==5.3.0
|
||||||
|
- getpass_asterisk
|
||||||
|
- gfpgan
|
||||||
|
- imageio-ffmpeg==0.4.2
|
||||||
|
- imageio==2.9.0
|
||||||
|
- kornia==0.6.0
|
||||||
|
- omegaconf==2.2.3
|
||||||
|
- opencv-python==4.5.5.64
|
||||||
|
- pillow==9.2.0
|
||||||
|
- pudb==2019.2
|
||||||
|
- pyreadline3
|
||||||
|
- pytorch-lightning==1.7.7
|
||||||
|
- realesrgan
|
||||||
|
- send2trash==1.8.0
|
||||||
|
- streamlit==1.12.0
|
||||||
|
- taming-transformers-rom1504
|
||||||
|
- test-tube>=0.7.5
|
||||||
|
- torch
|
||||||
|
- torch-fidelity==0.3.0
|
||||||
|
- torchaudio
|
||||||
|
- torchmetrics==0.7.0
|
||||||
|
- torchvision
|
||||||
|
- transformers==4.21.3
|
||||||
|
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||||
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
|
- -e .
|
@ -13,33 +13,33 @@ dependencies:
|
|||||||
- cudatoolkit=11.6
|
- cudatoolkit=11.6
|
||||||
- pip:
|
- pip:
|
||||||
- albumentations==0.4.3
|
- albumentations==0.4.3
|
||||||
- opencv-python==4.5.5.64
|
|
||||||
- pudb==2019.2
|
|
||||||
- imageio==2.9.0
|
|
||||||
- imageio-ffmpeg==0.4.2
|
|
||||||
- pytorch-lightning==1.7.7
|
|
||||||
- omegaconf==2.2.3
|
|
||||||
- test-tube>=0.7.5
|
|
||||||
- streamlit==1.12.0
|
|
||||||
- send2trash==1.8.0
|
|
||||||
- pillow==9.2.0
|
|
||||||
- einops==0.3.0
|
|
||||||
- pyreadline3
|
|
||||||
- torch-fidelity==0.3.0
|
|
||||||
- transformers==4.21.3
|
|
||||||
- diffusers==0.6.0
|
|
||||||
- torchmetrics==0.7.0
|
|
||||||
- flask==2.1.3
|
|
||||||
- flask_socketio==5.3.0
|
|
||||||
- flask_cors==3.0.10
|
|
||||||
- dependency_injector==4.40.0
|
- dependency_injector==4.40.0
|
||||||
|
- diffusers==0.6.0
|
||||||
|
- einops==0.3.0
|
||||||
- eventlet
|
- eventlet
|
||||||
|
- flask==2.1.3
|
||||||
|
- flask_cors==3.0.10
|
||||||
|
- flask_socketio==5.3.0
|
||||||
- getpass_asterisk
|
- getpass_asterisk
|
||||||
|
- gfpgan
|
||||||
|
- imageio-ffmpeg==0.4.2
|
||||||
|
- imageio==2.9.0
|
||||||
- kornia==0.6.0
|
- kornia==0.6.0
|
||||||
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
- omegaconf==2.2.3
|
||||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
- opencv-python==4.5.5.64
|
||||||
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
- pillow==9.2.0
|
||||||
- -e git+https://github.com/invoke-ai/Real-ESRGAN.git#egg=realesrgan
|
- pudb==2019.2
|
||||||
- -e git+https://github.com/invoke-ai/GFPGAN.git#egg=gfpgan
|
- pyreadline3
|
||||||
- -e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
- pytorch-lightning==1.7.7
|
||||||
|
- realesrgan
|
||||||
|
- send2trash==1.8.0
|
||||||
|
- streamlit==1.12.0
|
||||||
|
- taming-transformers-rom1504
|
||||||
|
- test-tube>=0.7.5
|
||||||
|
- torch-fidelity==0.3.0
|
||||||
|
- torchmetrics==0.7.0
|
||||||
|
- transformers==4.21.3
|
||||||
|
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||||
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
- -e .
|
- -e .
|
64
environments-and-requirements/environment-mac.yml
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
name: invokeai
|
||||||
|
channels:
|
||||||
|
- pytorch
|
||||||
|
- conda-forge
|
||||||
|
- defaults
|
||||||
|
dependencies:
|
||||||
|
- python=3.10
|
||||||
|
- pip>=22.2
|
||||||
|
- pytorch=1.12
|
||||||
|
- pytorch-lightning=1.7
|
||||||
|
- torchvision=0.13
|
||||||
|
- torchmetrics=0.10
|
||||||
|
- torch-fidelity=0.3
|
||||||
|
|
||||||
|
# I suggest to keep the other deps sorted for convenience.
|
||||||
|
# To determine what the latest versions should be, run:
|
||||||
|
#
|
||||||
|
# ```shell
|
||||||
|
# sed -E 's/invokeai/invokeai-updated/;20,99s/- ([^=]+)==.+/- \1/' environment-mac.yml > environment-mac-updated.yml
|
||||||
|
# CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac-updated.yml && conda list -n invokeai-updated | awk ' {print " - " $1 "==" $2;} '
|
||||||
|
# ```
|
||||||
|
|
||||||
|
- albumentations=1.2
|
||||||
|
- coloredlogs=15.0
|
||||||
|
- diffusers=0.6
|
||||||
|
- einops=0.3
|
||||||
|
- eventlet
|
||||||
|
- grpcio=1.46
|
||||||
|
- flask=2.1
|
||||||
|
- flask-socketio=5.3
|
||||||
|
- flask-cors=3.0
|
||||||
|
- humanfriendly=10.0
|
||||||
|
- imageio=2.21
|
||||||
|
- imageio-ffmpeg=0.4
|
||||||
|
- imgaug=0.4
|
||||||
|
- kornia=0.6
|
||||||
|
- mpmath=1.2
|
||||||
|
- nomkl=3
|
||||||
|
- numpy=1.23
|
||||||
|
- omegaconf=2.1
|
||||||
|
- openh264=2.3
|
||||||
|
- onnx=1.12
|
||||||
|
- onnxruntime=1.12
|
||||||
|
- pudb=2019.2
|
||||||
|
- protobuf=3.20
|
||||||
|
- py-opencv=4.6
|
||||||
|
- scipy=1.9
|
||||||
|
- streamlit=1.12
|
||||||
|
- sympy=1.10
|
||||||
|
- send2trash=1.8
|
||||||
|
- tensorboard=2.10
|
||||||
|
- transformers=4.23
|
||||||
|
- pip:
|
||||||
|
- getpass_asterisk
|
||||||
|
- taming-transformers-rom1504
|
||||||
|
- test-tube==0.7.5
|
||||||
|
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
|
- git+https://github.com/invoke-ai/k-diffusion.git@mps#egg=k_diffusion
|
||||||
|
- git+https://github.com/invoke-ai/Real-ESRGAN.git#egg=realesrgan
|
||||||
|
- git+https://github.com/invoke-ai/GFPGAN.git#egg=gfpgan
|
||||||
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
|
- -e .
|
||||||
|
variables:
|
||||||
|
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
46
environments-and-requirements/environment-win-cuda.yml
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
name: invokeai
|
||||||
|
channels:
|
||||||
|
- pytorch
|
||||||
|
- conda-forge
|
||||||
|
- defaults
|
||||||
|
dependencies:
|
||||||
|
- python>=3.9
|
||||||
|
- pip=22.2.2
|
||||||
|
- numpy=1.23.3
|
||||||
|
- torchvision=0.13.1
|
||||||
|
- torchaudio=0.12.1
|
||||||
|
- pytorch=1.12.1
|
||||||
|
- cudatoolkit=11.6
|
||||||
|
- pip:
|
||||||
|
- albumentations==0.4.3
|
||||||
|
- basicsr==1.4.1
|
||||||
|
- dependency_injector==4.40.0
|
||||||
|
- diffusers==0.6.0
|
||||||
|
- einops==0.3.0
|
||||||
|
- eventlet
|
||||||
|
- flask==2.1.3
|
||||||
|
- flask_cors==3.0.10
|
||||||
|
- flask_socketio==5.3.0
|
||||||
|
- getpass_asterisk
|
||||||
|
- gfpgan
|
||||||
|
- imageio-ffmpeg==0.4.2
|
||||||
|
- imageio==2.9.0
|
||||||
|
- kornia==0.6.0
|
||||||
|
- omegaconf==2.2.3
|
||||||
|
- opencv-python==4.5.5.64
|
||||||
|
- pillow==9.2.0
|
||||||
|
- pudb==2019.2
|
||||||
|
- pyreadline3
|
||||||
|
- pytorch-lightning==1.7.7
|
||||||
|
- realesrgan
|
||||||
|
- send2trash==1.8.0
|
||||||
|
- streamlit==1.12.0
|
||||||
|
- taming-transformers-rom1504
|
||||||
|
- test-tube>=0.7.5
|
||||||
|
- torch-fidelity==0.3.0
|
||||||
|
- torchmetrics==0.7.0
|
||||||
|
- transformers==4.21.3
|
||||||
|
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||||
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
|
- -e .
|
@ -1,41 +1,36 @@
|
|||||||
--prefer-binary
|
|
||||||
|
|
||||||
albumentations
|
|
||||||
einops
|
|
||||||
getpass_asterisk
|
|
||||||
huggingface-hub
|
|
||||||
imageio-ffmpeg
|
|
||||||
imageio
|
|
||||||
kornia
|
|
||||||
# pip will resolve the version which matches torch
|
# pip will resolve the version which matches torch
|
||||||
|
albumentations
|
||||||
|
dependency_injector==4.40.0
|
||||||
|
diffusers
|
||||||
|
einops
|
||||||
|
eventlet
|
||||||
|
flask==2.1.3
|
||||||
|
flask_cors==3.0.10
|
||||||
|
flask_socketio==5.3.0
|
||||||
|
flaskwebgui==0.3.7
|
||||||
|
getpass_asterisk
|
||||||
|
gfpgan
|
||||||
|
huggingface-hub
|
||||||
|
imageio
|
||||||
|
imageio-ffmpeg
|
||||||
|
kornia
|
||||||
numpy
|
numpy
|
||||||
omegaconf
|
omegaconf
|
||||||
opencv-python
|
opencv-python
|
||||||
pillow
|
pillow
|
||||||
pip>=22
|
pip>=22
|
||||||
pudb
|
pudb
|
||||||
pytorch-lightning==1.7.7
|
|
||||||
scikit-image>=0.19
|
|
||||||
streamlit
|
|
||||||
pyreadline3
|
pyreadline3
|
||||||
# "CompVis/taming-transformers" IS NOT INSTALLABLE
|
pytorch-lightning==1.7.7
|
||||||
# This is a drop-in replacement
|
realesrgan
|
||||||
|
scikit-image>=0.19
|
||||||
|
send2trash
|
||||||
|
streamlit
|
||||||
taming-transformers-rom1504
|
taming-transformers-rom1504
|
||||||
test-tube
|
test-tube
|
||||||
torch-fidelity
|
torch-fidelity
|
||||||
torchmetrics
|
torchmetrics
|
||||||
transformers==4.21.*
|
transformers==4.21.*
|
||||||
flask==2.1.3
|
|
||||||
flask_socketio==5.3.0
|
|
||||||
flask_cors==3.0.10
|
|
||||||
flaskwebgui==0.3.7
|
|
||||||
send2trash
|
|
||||||
dependency_injector==4.40.0
|
|
||||||
eventlet
|
|
||||||
realesrgan
|
|
||||||
diffusers
|
|
||||||
git+https://github.com/openai/CLIP.git@main#egg=clip
|
git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||||
git+https://github.com/invoke-ai/Real-ESRGAN.git#egg=realesrgan
|
git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
git+https://github.com/invoke-ai/GFPGAN.git#egg=gfpgan
|
|
||||||
-e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
|
@ -1,4 +1,4 @@
|
|||||||
-r requirements.txt
|
-r environments-and-requirements/requirements-base.txt
|
||||||
|
|
||||||
# Get hardware-appropriate torch/torchvision
|
# Get hardware-appropriate torch/torchvision
|
||||||
--extra-index-url https://download.pytorch.org/whl/rocm5.1.1 --trusted-host https://download.pytorch.org
|
--extra-index-url https://download.pytorch.org/whl/rocm5.1.1 --trusted-host https://download.pytorch.org
|
3
environments-and-requirements/requirements-lin-arm64.txt
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
--pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cpu
|
||||||
|
-r environments-and-requirements/requirements-base.txt
|
||||||
|
-e .
|
2
environments-and-requirements/requirements-lin-cuda.txt
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
-r environments-and-requirements/requirements-base.txt
|
||||||
|
-e .
|
@ -1,4 +1,4 @@
|
|||||||
-r requirements.txt
|
-r environments-and-requirements/requirements-base.txt
|
||||||
|
|
||||||
protobuf==3.19.6
|
protobuf==3.19.6
|
||||||
torch<1.13.0
|
torch<1.13.0
|
@ -1,7 +1,8 @@
|
|||||||
-r requirements.txt
|
-r environments-and-requirements/requirements-base.txt
|
||||||
|
|
||||||
# Get hardware-appropriate torch/torchvision
|
# Get hardware-appropriate torch/torchvision
|
||||||
--extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org
|
--extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org
|
||||||
|
basicsr==1.4.1
|
||||||
torch==1.12.1
|
torch==1.12.1
|
||||||
torchvision==0.13.1
|
torchvision==0.13.1
|
||||||
-e .
|
-e .
|