Compare commits
71 Commits
developmen
...
2.1.3-rc7
Author | SHA1 | Date | |
---|---|---|---|
9a1fe8e7fb | |||
ff56f5251b | |||
ed943bd6c7 | |||
7ad2355b1d | |||
66c920fc19 | |||
3fc5cb09f8 | |||
1345ec77ab | |||
b116715490 | |||
fa3670270e | |||
c304250ef6 | |||
802ce5dde5 | |||
311ee320ec | |||
e9df17b374 | |||
061fb4ef00 | |||
52be0d2396 | |||
4095acd10e | |||
201eb22d76 | |||
17ab982200 | |||
a04965b0e9 | |||
0b529f0c57 | |||
6f9f848345 | |||
918c1589ef | |||
116415b3fc | |||
b4b6eabaac | |||
4ef1f4a854 | |||
510fc4ebaa | |||
a20914434b | |||
0d134195fd | |||
649d8c8573 | |||
a358d370a0 | |||
94a9033c4f | |||
18a947c503 | |||
a23b031895 | |||
23af68c7d7 | |||
e258beeb51 | |||
7460c069b8 | |||
e481bfac61 | |||
5040747c67 | |||
d1ab65a431 | |||
af4ee7feb8 | |||
764fb29ade | |||
1014d3ba44 | |||
40a48aca88 | |||
92abc00f16 | |||
a5719aabf8 | |||
44a18511fa | |||
b850dbadaf | |||
9ef8b944d5 | |||
efc5a98488 | |||
1417c87928 | |||
2dd6fc2b93 | |||
22213612a0 | |||
71ee44a827 | |||
b17ca0a5e7 | |||
71bbfe4a1a | |||
5702271991 | |||
10781e7dc4 | |||
099d1157c5 | |||
ab825bf7ee | |||
10cfeb5ada | |||
e97515d045 | |||
0f04bc5789 | |||
3f74aabecd | |||
b1a99a51b7 | |||
8004f8a6d9 | |||
ff8ff2212a | |||
8e5363cd83 | |||
1450779146 | |||
8cd5d95b8a | |||
abd6407394 | |||
734dacfbe9 |
1
.github/CODEOWNERS
vendored
@ -2,3 +2,4 @@ ldm/invoke/pngwriter.py @CapableWeb
|
||||
ldm/invoke/server_legacy.py @CapableWeb
|
||||
scripts/legacy_api.py @CapableWeb
|
||||
tests/legacy_tests.sh @CapableWeb
|
||||
installer/ @tildebyte
|
||||
|
4
.github/workflows/build-container.yml
vendored
@ -17,9 +17,9 @@ jobs:
|
||||
- aarch64
|
||||
include:
|
||||
- arch: x86_64
|
||||
conda-env-file: environment.yml
|
||||
conda-env-file: environment-lin-cuda.yml
|
||||
- arch: aarch64
|
||||
conda-env-file: environment-linux-aarch64.yml
|
||||
conda-env-file: environment-lin-aarch64.yml
|
||||
runs-on: ubuntu-latest
|
||||
name: ${{ matrix.arch }}
|
||||
steps:
|
||||
|
7
.github/workflows/test-invoke-conda.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
||||
- macOS-12
|
||||
include:
|
||||
- os: ubuntu-latest
|
||||
environment-file: environment.yml
|
||||
environment-file: environment-lin-cuda.yml
|
||||
default-shell: bash -l {0}
|
||||
- os: macOS-12
|
||||
environment-file: environment-mac.yml
|
||||
@ -49,6 +49,9 @@ jobs:
|
||||
- name: create models.yaml from example
|
||||
run: cp configs/models.yaml.example configs/models.yaml
|
||||
|
||||
- name: create environment.yml
|
||||
run: cp environments-and-requirements/${{ matrix.environment-file }} environment.yml
|
||||
|
||||
- name: Use cached conda packages
|
||||
id: use-cached-conda-packages
|
||||
uses: actions/cache@v3
|
||||
@ -61,7 +64,7 @@ jobs:
|
||||
uses: conda-incubator/setup-miniconda@v2
|
||||
with:
|
||||
activate-environment: ${{ env.CONDA_ENV_NAME }}
|
||||
environment-file: ${{ matrix.environment-file }}
|
||||
environment-file: environment.yml
|
||||
miniconda-version: latest
|
||||
|
||||
- name: set test prompt to main branch validation
|
||||
|
23
.gitignore
vendored
@ -194,6 +194,10 @@ checkpoints
|
||||
|
||||
# Let the frontend manage its own gitignore
|
||||
!frontend/*
|
||||
frontend/apt-get
|
||||
frontend/dist
|
||||
frontend/sudo
|
||||
frontend/update
|
||||
|
||||
# Scratch folder
|
||||
.scratch/
|
||||
@ -201,6 +205,7 @@ checkpoints
|
||||
gfpgan/
|
||||
models/ldm/stable-diffusion-v1/*.sha256
|
||||
|
||||
|
||||
# GFPGAN model files
|
||||
gfpgan/
|
||||
|
||||
@ -209,6 +214,24 @@ configs/models.yaml
|
||||
|
||||
# weights (will be created by installer)
|
||||
models/ldm/stable-diffusion-v1/*.ckpt
|
||||
models/clipseg
|
||||
models/gfpgan
|
||||
|
||||
# ignore initfile
|
||||
invokeai.init
|
||||
|
||||
# ignore environment.yml and requirements.txt
|
||||
# these are links to the real files in environments-and-requirements
|
||||
environment.yml
|
||||
requirements.txt
|
||||
|
||||
# source installer files
|
||||
source_installer/*zip
|
||||
source_installer/invokeAI
|
||||
install.bat
|
||||
install.sh
|
||||
update.bat
|
||||
update.sh
|
||||
|
||||
# this may be present if the user created a venv
|
||||
invokeai
|
||||
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 466 KiB After Width: | Height: | Size: 466 KiB |
Before Width: | Height: | Size: 7.4 KiB After Width: | Height: | Size: 7.4 KiB |
Before Width: | Height: | Size: 539 KiB After Width: | Height: | Size: 539 KiB |
Before Width: | Height: | Size: 7.6 KiB After Width: | Height: | Size: 7.6 KiB |
Before Width: | Height: | Size: 450 KiB After Width: | Height: | Size: 450 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 553 KiB After Width: | Height: | Size: 553 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 418 KiB After Width: | Height: | Size: 418 KiB |
Before Width: | Height: | Size: 6.1 KiB After Width: | Height: | Size: 6.1 KiB |
Before Width: | Height: | Size: 542 KiB After Width: | Height: | Size: 542 KiB |
Before Width: | Height: | Size: 9.5 KiB After Width: | Height: | Size: 9.5 KiB |
Before Width: | Height: | Size: 395 KiB After Width: | Height: | Size: 395 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 465 KiB After Width: | Height: | Size: 465 KiB |
Before Width: | Height: | Size: 7.8 KiB After Width: | Height: | Size: 7.8 KiB |
@ -43,33 +43,42 @@ RUN apt-get update \
|
||||
ARG invokeai_git=invoke-ai/InvokeAI
|
||||
ARG invokeai_branch=main
|
||||
ARG project_name=invokeai
|
||||
RUN git clone -b ${invokeai_branch} https://github.com/${invokeai_git}.git /${project_name} \
|
||||
&& cp /${project_name}/configs/models.yaml.example /${project_name}/configs/models.yaml \
|
||||
&& ln -s /data/models/v1-5-pruned-emaonly.ckpt /${project_name}/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt \
|
||||
&& ln -s /data/outputs/ /${project_name}/outputs
|
||||
ARG conda_env_file=environment-lin-cuda.yml
|
||||
RUN git clone -b ${invokeai_branch} https://github.com/${invokeai_git}.git "/${project_name}" \
|
||||
&& cp \
|
||||
"/${project_name}/configs/models.yaml.example" \
|
||||
"/${project_name}/configs/models.yaml" \
|
||||
&& ln -sf \
|
||||
"/${project_name}/environments-and-requirements/${conda_env_file}" \
|
||||
"/${project_name}/environment.yml" \
|
||||
&& ln -sf \
|
||||
/data/models/v1-5-pruned-emaonly.ckpt \
|
||||
"/${project_name}/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt" \
|
||||
&& ln -sf \
|
||||
/data/outputs/ \
|
||||
"/${project_name}/outputs"
|
||||
|
||||
# set workdir
|
||||
WORKDIR /${project_name}
|
||||
WORKDIR "/${project_name}"
|
||||
|
||||
# install conda env and preload models
|
||||
ARG conda_prefix=/opt/conda
|
||||
ARG conda_env_file=environment.yml
|
||||
COPY --from=get_miniconda ${conda_prefix} ${conda_prefix}
|
||||
RUN source ${conda_prefix}/etc/profile.d/conda.sh \
|
||||
COPY --from=get_miniconda "${conda_prefix}" "${conda_prefix}"
|
||||
RUN source "${conda_prefix}/etc/profile.d/conda.sh" \
|
||||
&& conda init bash \
|
||||
&& source ~/.bashrc \
|
||||
&& conda env create \
|
||||
--name ${project_name} \
|
||||
--file ${conda_env_file} \
|
||||
--name "${project_name}" \
|
||||
&& rm -Rf ~/.cache \
|
||||
&& conda clean -afy \
|
||||
&& echo "conda activate ${project_name}" >> ~/.bashrc \
|
||||
&& conda activate ${project_name} \
|
||||
&& echo "conda activate ${project_name}" >> ~/.bashrc
|
||||
|
||||
RUN source ~/.bashrc \
|
||||
&& python scripts/preload_models.py \
|
||||
--no-interactive
|
||||
|
||||
# Copy entrypoint and set env
|
||||
ENV CONDA_PREFIX=${conda_prefix}
|
||||
ENV PROJECT_NAME=${project_name}
|
||||
ENV CONDA_PREFIX="${conda_prefix}"
|
||||
ENV PROJECT_NAME="${project_name}"
|
||||
COPY docker-build/entrypoint.sh /
|
||||
ENTRYPOINT [ "/entrypoint.sh" ]
|
||||
|
@ -8,7 +8,7 @@ source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
||||
|
||||
invokeai_conda_version=${INVOKEAI_CONDA_VERSION:-py39_4.12.0-${platform/\//-}}
|
||||
invokeai_conda_prefix=${INVOKEAI_CONDA_PREFIX:-\/opt\/conda}
|
||||
invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment.yml}
|
||||
invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment-lin-cuda.yml}
|
||||
invokeai_git=${INVOKEAI_GIT:-invoke-ai/InvokeAI}
|
||||
invokeai_branch=${INVOKEAI_BRANCH:-main}
|
||||
huggingface_token=${HUGGINGFACE_TOKEN?}
|
||||
|
@ -19,13 +19,13 @@ tree on a hill with a river, nature photograph, national geographic -I./test-pic
|
||||
This will take the original image shown here:
|
||||
|
||||
<figure markdown>
|
||||

|
||||
{ width=320 }
|
||||
</figure>
|
||||
|
||||
and generate a new image based on it as shown here:
|
||||
|
||||
<figure markdown>
|
||||

|
||||
{ width=320 }
|
||||
</figure>
|
||||
|
||||
The `--init_img` (`-I`) option gives the path to the seed picture. `--strength`
|
||||
@ -45,15 +45,16 @@ Note that the prompt makes a big difference. For example, this slight variation
|
||||
on the prompt produces a very different image:
|
||||
|
||||
<figure markdown>
|
||||

|
||||
{ width=320 }
|
||||
<caption markdown>photograph of a tree on a hill with a river</caption>
|
||||
</figure>
|
||||
|
||||
!!! tip
|
||||
|
||||
When designing prompts, think about how the images scraped from the internet were captioned. Very few photographs will
|
||||
be labeled "photograph" or "photorealistic." They will, however, be captioned with the publication, photographer, camera
|
||||
model, or film settings.
|
||||
When designing prompts, think about how the images scraped from the internet were
|
||||
captioned. Very few photographs will be labeled "photograph" or "photorealistic."
|
||||
They will, however, be captioned with the publication, photographer, camera model,
|
||||
or film settings.
|
||||
|
||||
If the initial image contains transparent regions, then Stable Diffusion will
|
||||
only draw within the transparent regions, a process called
|
||||
@ -61,17 +62,17 @@ only draw within the transparent regions, a process called
|
||||
However, for this to work correctly, the color information underneath the
|
||||
transparent needs to be preserved, not erased.
|
||||
|
||||
!!! warning
|
||||
!!! warning "**IMPORTANT ISSUE** "
|
||||
|
||||
**IMPORTANT ISSUE** `img2img` does not work properly on initial images smaller
|
||||
than 512x512. Please scale your image to at least 512x512 before using it.
|
||||
Larger images are not a problem, but may run out of VRAM on your GPU card. To
|
||||
fix this, use the --fit option, which downscales the initial image to fit within
|
||||
the box specified by width x height:
|
||||
`img2img` does not work properly on initial images smaller
|
||||
than 512x512. Please scale your image to at least 512x512 before using it.
|
||||
Larger images are not a problem, but may run out of VRAM on your GPU card. To
|
||||
fix this, use the --fit option, which downscales the initial image to fit within
|
||||
the box specified by width x height:
|
||||
|
||||
```
|
||||
tree on a hill with a river, national geographic -I./test-pictures/big-sketch.png -H512 -W512 --fit
|
||||
```
|
||||
```
|
||||
tree on a hill with a river, national geographic -I./test-pictures/big-sketch.png -H512 -W512 --fit
|
||||
```
|
||||
|
||||
## How does it actually work, though?
|
||||
|
||||
@ -87,7 +88,7 @@ from a prompt. If the step count is 10, then the "latent space" (Stable
|
||||
Diffusion's internal representation of the image) for the prompt "fire" with
|
||||
seed `1592514025` develops something like this:
|
||||
|
||||
```commandline
|
||||
```bash
|
||||
invoke> "fire" -s10 -W384 -H384 -S1592514025
|
||||
```
|
||||
|
||||
@ -133,9 +134,9 @@ Notice how much more fuzzy the starting image is for strength `0.7` compared to
|
||||
|
||||
| | strength = 0.7 | strength = 0.4 |
|
||||
| --------------------------- | ------------------------------------------------------------- | ------------------------------------------------------------- |
|
||||
| initial image that SD sees |  |  |
|
||||
| initial image that SD sees |  |  |
|
||||
| steps argument to `invoke>` | `-S10` | `-S10` |
|
||||
| steps actually taken | 7 | 4 |
|
||||
| steps actually taken | `7` | `4` |
|
||||
| latent space at each step |  |  |
|
||||
| output |  |  |
|
||||
|
||||
@ -150,7 +151,7 @@ If you want to try this out yourself, all of these are using a seed of
|
||||
`1592514025` with a width/height of `384`, step count `10`, the default sampler
|
||||
(`k_lms`), and the single-word prompt `"fire"`:
|
||||
|
||||
```commandline
|
||||
```bash
|
||||
invoke> "fire" -s10 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png --strength 0.7
|
||||
```
|
||||
|
||||
@ -170,7 +171,7 @@ give each generation 20 steps.
|
||||
Here's strength `0.4` (note step count `50`, which is `20 ÷ 0.4` to make sure SD
|
||||
does `20` steps from my image):
|
||||
|
||||
```commandline
|
||||
```bash
|
||||
invoke> "fire" -s50 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.4
|
||||
```
|
||||
|
||||
|
@ -92,6 +92,21 @@ The new image is larger than the original (576x704) because 64 pixels were added
|
||||
to the top and right sides. You will need enough VRAM to process an image of
|
||||
this size.
|
||||
|
||||
#### Outcropping non-InvokeAI images
|
||||
|
||||
You can outcrop an arbitrary image that was not generated by InvokeAI,
|
||||
but your results will vary. The `inpainting-1.5` model is highly
|
||||
recommended, but if not feasible, then you may be able to improve the
|
||||
output by conditioning the outcropping with a text prompt that
|
||||
describes the scene using the `--new_prompt` argument:
|
||||
|
||||
```bash
|
||||
invoke> !fix images/vacation.png --outcrop top 128 --new_prompt "family vacation"
|
||||
```
|
||||
|
||||
You may also provide a different seed for outcropping to use by passing
|
||||
`-S<seed>`. A negative seed will generate a new random seed.
|
||||
|
||||
A number of caveats:
|
||||
|
||||
1. Although you can specify any pixel values, they will be rounded up to the
|
||||
@ -111,7 +126,7 @@ A number of caveats:
|
||||
the border.
|
||||
|
||||
4. When using the `inpaint-1.5` model, you may notice subtle changes to the area
|
||||
within the original image. This is because the model performs an
|
||||
outside the masked region. This is because the model performs an
|
||||
encoding/decoding on the image as a whole. This does not occur with the
|
||||
standard model.
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
title: WebUI Hotkey List
|
||||
---
|
||||
|
||||
# **WebUI Hotkey List**
|
||||
# :material-keyboard: **WebUI Hotkey List**
|
||||
|
||||
## General
|
||||
|
||||
@ -19,7 +19,7 @@ title: WebUI Hotkey List
|
||||
| ++ctrl+enter++ | Start processing |
|
||||
| ++shift+x++ | cancel Processing |
|
||||
| ++shift+d++ | Toggle Dark Mode |
|
||||
| ` | Toggle console |
|
||||
| ++"`"++ | Toggle console |
|
||||
|
||||
## Tabs
|
||||
|
||||
@ -48,10 +48,10 @@ title: WebUI Hotkey List
|
||||
|
||||
| Setting | Hotkey |
|
||||
| ---------------------------- | --------------------- |
|
||||
| [ | Decrease brush size |
|
||||
| ] | Increase brush size |
|
||||
| alt + [ | Decrease mask opacity |
|
||||
| alt + ] | Increase mask opacity |
|
||||
| ++"["++ | Decrease brush size |
|
||||
| ++"]"++ | Increase brush size |
|
||||
| ++alt+"["++ | Decrease mask opacity |
|
||||
| ++alt+"]"++ | Increase mask opacity |
|
||||
| ++b++ | Select brush |
|
||||
| ++e++ | Select eraser |
|
||||
| ++ctrl+z++ | Undo brush stroke |
|
||||
|
@ -94,6 +94,7 @@ installation instructions below.
|
||||
You wil need one of the following:
|
||||
|
||||
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux only)
|
||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||
|
||||
### :fontawesome-solid-memory: Memory
|
||||
|
@ -4,26 +4,30 @@ title: Docker
|
||||
|
||||
# :fontawesome-brands-docker: Docker
|
||||
|
||||
## Before you begin
|
||||
!!! warning "For end users"
|
||||
|
||||
- For end users: Install Stable Diffusion locally using the instructions for
|
||||
your OS.
|
||||
- For developers: For container-related development tasks or for enabling easy
|
||||
deployment to other environments (on-premises or cloud), follow these
|
||||
instructions. For general use, install locally to leverage your machine's GPU.
|
||||
We highly recommend to Install InvokeAI locally using [these instructions](index.md)"
|
||||
|
||||
!!! tip "For developers"
|
||||
|
||||
For container-related development tasks or for enabling easy
|
||||
deployment to other environments (on-premises or cloud), follow these
|
||||
instructions.
|
||||
|
||||
For general use, install locally to leverage your machine's GPU.
|
||||
|
||||
## Why containers?
|
||||
|
||||
They provide a flexible, reliable way to build and deploy Stable Diffusion.
|
||||
You'll also use a Docker volume to store the largest model files and image
|
||||
outputs as a first step in decoupling storage and compute. Future enhancements
|
||||
can do this for other assets. See [Processes](https://12factor.net/processes)
|
||||
under the Twelve-Factor App methodology for details on why running applications
|
||||
in such a stateless fashion is important.
|
||||
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
|
||||
use a Docker volume to store the largest model files and image outputs as a
|
||||
first step in decoupling storage and compute. Future enhancements can do this
|
||||
for other assets. See [Processes](https://12factor.net/processes) under the
|
||||
Twelve-Factor App methodology for details on why running applications in such a
|
||||
stateless fashion is important.
|
||||
|
||||
You can specify the target platform when building the image and running the
|
||||
container. You'll also need to specify the Stable Diffusion requirements file
|
||||
that matches the container's OS and the architecture it will run on.
|
||||
container. You'll also need to specify the InvokeAI requirements file that
|
||||
matches the container's OS and the architecture it will run on.
|
||||
|
||||
Developers on Apple silicon (M1/M2): You
|
||||
[can't access your GPU cores from Docker containers](https://github.com/pytorch/pytorch/issues/81224)
|
||||
@ -38,16 +42,19 @@ another environment with NVIDIA GPUs on-premises or in the cloud.
|
||||
|
||||
#### Install [Docker](https://github.com/santisbon/guides#docker)
|
||||
|
||||
On the Docker Desktop app, go to Preferences, Resources, Advanced. Increase the
|
||||
CPUs and Memory to avoid this
|
||||
On the [Docker Desktop app](https://docs.docker.com/get-docker/), go to
|
||||
Preferences, Resources, Advanced. Increase the CPUs and Memory to avoid this
|
||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues/342). You may need to
|
||||
increase Swap and Disk image size too.
|
||||
|
||||
#### Get a Huggingface-Token
|
||||
|
||||
Go to [Hugging Face](https://huggingface.co/settings/tokens), create a token and
|
||||
temporary place it somewhere like a open texteditor window (but dont save it!,
|
||||
only keep it open, we need it in the next step)
|
||||
Besides the Docker Agent you will need an Account on
|
||||
[huggingface.co](https://huggingface.co/join).
|
||||
|
||||
After you succesfully registered your account, go to
|
||||
[huggingface.co/settings/tokens](https://huggingface.co/settings/tokens), create
|
||||
a token and copy it, since you will need in for the next step.
|
||||
|
||||
### Setup
|
||||
|
||||
@ -65,13 +72,14 @@ created in the last step.
|
||||
|
||||
Some Suggestions of variables you may want to change besides the Token:
|
||||
|
||||
| Environment-Variable | Description |
|
||||
| ------------------------------------------------------------------- | ------------------------------------------------------------------------ |
|
||||
| `HUGGINGFACE_TOKEN="hg_aewirhghlawrgkjbarug2"` | This is the only required variable, without you can't get the checkpoint |
|
||||
| `ARCH=aarch64` | if you are using a ARM based CPU |
|
||||
| `INVOKEAI_TAG=yourname/invokeai:latest` | the Container Repository / Tag which will be used |
|
||||
| `INVOKEAI_CONDA_ENV_FILE=environment-linux-aarch64.yml` | since environment.yml wouldn't work with aarch |
|
||||
| `INVOKEAI_GIT="-b branchname https://github.com/username/reponame"` | if you want to use your own fork |
|
||||
| Environment-Variable | Default value | Description |
|
||||
| ------------------------- | ----------------------------- | ---------------------------------------------------------------------------- |
|
||||
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without you can't get the checkpoint |
|
||||
| `ARCH` | x86_64 | if you are using a ARM based CPU |
|
||||
| `INVOKEAI_TAG` | invokeai-x86_64 | the Container Repository / Tag which will be used |
|
||||
| `INVOKEAI_CONDA_ENV_FILE` | environment-lin-cuda.yml | since environment.yml wouldn't work with aarch |
|
||||
| `INVOKEAI_GIT` | invoke-ai/InvokeAI | the repository to use |
|
||||
| `INVOKEAI_BRANCH` | main | the branch to checkout |
|
||||
|
||||
#### Build the Image
|
||||
|
||||
@ -79,25 +87,41 @@ I provided a build script, which is located in `docker-build/build.sh` but still
|
||||
needs to be executed from the Repository root.
|
||||
|
||||
```bash
|
||||
docker-build/build.sh
|
||||
./docker-build/build.sh
|
||||
```
|
||||
|
||||
The build Script not only builds the container, but also creates the docker
|
||||
volume if not existing yet, or if empty it will just download the models. When
|
||||
it is done you can run the container via the run script
|
||||
volume if not existing yet, or if empty it will just download the models.
|
||||
|
||||
#### Run the Container
|
||||
|
||||
After the build process is done, you can run the container via the provided
|
||||
`docker-build/run.sh` script
|
||||
|
||||
```bash
|
||||
docker-build/run.sh
|
||||
./docker-build/run.sh
|
||||
```
|
||||
|
||||
When used without arguments, the container will start the website and provide
|
||||
When used without arguments, the container will start the webserver and provide
|
||||
you the link to open it. But if you want to use some other parameters you can
|
||||
also do so.
|
||||
|
||||
!!! example ""
|
||||
|
||||
```bash
|
||||
./docker-build/run.sh --from_file tests/validate_pr_prompt.txt
|
||||
```
|
||||
|
||||
The output folder is located on the volume which is also used to store the model.
|
||||
|
||||
Find out more about available CLI-Parameters at [features/CLI.md](../features/CLI.md/#arguments)
|
||||
|
||||
---
|
||||
|
||||
!!! warning "Deprecated"
|
||||
|
||||
From here on it is the rest of the previous Docker-Docs, which will still
|
||||
provide usefull informations for one or the other.
|
||||
From here on you will find the the previous Docker-Docs, which will still
|
||||
provide some usefull informations.
|
||||
|
||||
## Usage (time to have fun)
|
||||
|
||||
|
55
docs/installation/INSTALL_INVOKE.md
Normal file
@ -0,0 +1,55 @@
|
||||
---
|
||||
title: InvokeAI Installer
|
||||
---
|
||||
|
||||
The InvokeAI installer is a shell script that will install InvokeAI onto a stock
|
||||
computer running recent versions of Linux, MacOSX or Windows. It will leave you
|
||||
with a version that runs a stable version of InvokeAI. When a new version of
|
||||
InvokeAI is released, you will download and reinstall the new version.
|
||||
|
||||
If you wish to tinker with unreleased versions of InvokeAI that introduce
|
||||
potentially unstable new features, you should consider using the
|
||||
[source installer](INSTALL_SOURCE.md) or one of the
|
||||
[manual install](INSTALL_MANUAL.md) methods.
|
||||
|
||||
!!! todo
|
||||
|
||||
Before you begin, make sure that you meet
|
||||
the[hardware requirements](/#hardware-requirements) and has the
|
||||
appropriate GPU drivers installed. In particular, if you are a Linux user with
|
||||
an AMD GPU installed, you may need to install the
|
||||
[ROCm-driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||
|
||||
Installation requires roughly 18G of free disk space to load the libraries and
|
||||
recommended model weights files.
|
||||
|
||||
## Steps to Install
|
||||
|
||||
1. Download the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest) of
|
||||
InvokeAI's installer for your platform
|
||||
|
||||
2. Place the downloaded package someplace where you have plenty of HDD space,
|
||||
and have full permissions (i.e. `~/` on Lin/Mac; your home folder on Windows)
|
||||
|
||||
3. Extract the 'InvokeAI' folder from the downloaded package
|
||||
|
||||
4. Open the extracted 'InvokeAI' folder
|
||||
|
||||
5. Double-click 'install.bat' (Windows), or 'install.sh' (Lin/Mac) (or run from
|
||||
a terminal)
|
||||
|
||||
6. Follow the prompts
|
||||
|
||||
7. After installation, please run the 'invoke.bat' file (on Windows) or
|
||||
'invoke.sh' file (on Linux/Mac) to start InvokeAI.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you run into problems during or after installation, the InvokeAI team is
|
||||
available to help you. Either create an
|
||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
||||
make a request for help on the "bugs-and-support" channel of our
|
||||
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
||||
organization, but typically somebody will be available to help you within 24
|
||||
hours, and often much sooner.
|
27
docs/installation/INSTALL_JUPYTER.md
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
title: Running InvokeAI on Google Colab using a Jupyter Notebook
|
||||
---
|
||||
|
||||
# THIS NEEDS TO BE FLESHED OUT
|
||||
|
||||
## Introduction
|
||||
|
||||
We have a [Jupyter
|
||||
notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable-Diffusion-local-Windows.ipynb)
|
||||
with cell-by-cell installation steps. It will download the code in
|
||||
this repo as one of the steps, so instead of cloning this repo, simply
|
||||
download the notebook from the link above and load it up in VSCode
|
||||
(with the appropriate extensions installed)/Jupyter/JupyterLab and
|
||||
start running the cells one-by-one.
|
||||
|
||||
!!! Note "you will need NVIDIA drivers, Python 3.10, and Git installed beforehand"
|
||||
|
||||
## Walkthrough
|
||||
|
||||
## Updating to newer versions
|
||||
|
||||
### Updating the stable version
|
||||
|
||||
### Updating to the development version
|
||||
|
||||
## Troubleshooting
|
416
docs/installation/INSTALL_MANUAL.md
Normal file
@ -0,0 +1,416 @@
|
||||
---
|
||||
title: Manual Installation
|
||||
---
|
||||
|
||||
<figure markdown>
|
||||
# :fontawesome-brands-linux: Linux | :fontawesome-brands-apple: macOS | :fontawesome-brands-windows: Windows
|
||||
</figure>
|
||||
|
||||
!!! warning "This is for advanced Users"
|
||||
|
||||
who are already expirienced with using conda or pip
|
||||
|
||||
## Introduction
|
||||
|
||||
You have two choices for manual installation, the [first one](#Conda_method)
|
||||
based on the Anaconda3 package manager (`conda`), and
|
||||
[a second one](#PIP_method) which uses basic Python virtual environment (`venv`)
|
||||
commands and the PIP package manager. Both methods require you to enter commands
|
||||
on the terminal, also known as the "console".
|
||||
|
||||
On Windows systems you are encouraged to install and use the
|
||||
[Powershell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
|
||||
which provides compatibility with Linux and Mac shells and nice features such as
|
||||
command-line completion.
|
||||
|
||||
### Conda method
|
||||
|
||||
1. Check that your system meets the
|
||||
[hardware requirements](index.md#Hardware_Requirements) and has the
|
||||
appropriate GPU drivers installed. In particular, if you are a Linux user
|
||||
with an AMD GPU installed, you may need to install the
|
||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||
|
||||
InvokeAI does not yet support Windows machines with AMD GPUs due to the lack
|
||||
of ROCm driver support on this platform.
|
||||
|
||||
To confirm that the appropriate drivers are installed, run `nvidia-smi` on
|
||||
NVIDIA/CUDA systems, and `rocm-smi` on AMD systems. These should return
|
||||
information about the installed video card.
|
||||
|
||||
Macintosh users with MPS acceleration, or anybody with a CPU-only system,
|
||||
can skip this step.
|
||||
|
||||
2. You will need to install Anaconda3 and Git if they are not already
|
||||
available. Use your operating system's preferred package manager, or
|
||||
download the installers manually. You can find them here:
|
||||
|
||||
- [Anaconda3](https://www.anaconda.com/)
|
||||
- [git](https://git-scm.com/downloads)
|
||||
|
||||
3. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
|
||||
GitHub:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
```
|
||||
|
||||
This will create InvokeAI folder where you will follow the rest of the
|
||||
steps.
|
||||
|
||||
4. Enter the newly-created InvokeAI folder:
|
||||
|
||||
```bash
|
||||
cd InvokeAI
|
||||
```
|
||||
|
||||
From this step forward make sure that you are working in the InvokeAI
|
||||
directory!
|
||||
|
||||
5. Select the appropriate environment file:
|
||||
|
||||
We have created a series of environment files suited for different operating
|
||||
systems and GPU hardware. They are located in the
|
||||
`environments-and-requirements` directory:
|
||||
|
||||
<figure markdown>
|
||||
|
||||
| filename | OS |
|
||||
| :----------------------: | :----------------------------: |
|
||||
| environment-lin-amd.yml | Linux with an AMD (ROCm) GPU |
|
||||
| environment-lin-cuda.yml | Linux with an NVIDIA CUDA GPU |
|
||||
| environment-mac.yml | Macintosh |
|
||||
| environment-win-cuda.yml | Windows with an NVIDA CUDA GPU |
|
||||
|
||||
</figure>
|
||||
|
||||
Choose the appropriate environment file for your system and link or copy it
|
||||
to `environment.yml` in InvokeAI's top-level directory. To do so, run
|
||||
following command from the repository-root:
|
||||
|
||||
!!! Example ""
|
||||
|
||||
=== "Macintosh and Linux"
|
||||
|
||||
!!! todo "Replace `xxx` and `yyy` with the appropriate OS and GPU codes as seen in the table above"
|
||||
|
||||
```bash
|
||||
ln -sf environments-and-requirements/environment-xxx-yyy.yml environment.yml
|
||||
```
|
||||
|
||||
When this is done, confirm that a file `environment.yml` has been linked in
|
||||
the InvokeAI root directory and that it points to the correct file in the
|
||||
`environments-and-requirements`.
|
||||
|
||||
```bash
|
||||
ls -la
|
||||
```
|
||||
|
||||
=== "Windows"
|
||||
|
||||
!!! todo " Since it requires admin privileges to create links, we will use the copy command to create your `environment.yml`"
|
||||
|
||||
```cmd
|
||||
copy environments-and-requirements\environment-win-cuda.yml environment.yml
|
||||
```
|
||||
|
||||
Afterwards verify that the file `environment.yml` has been created, either via the
|
||||
explorer or by using the command `dir` from the terminal
|
||||
|
||||
```cmd
|
||||
dir
|
||||
```
|
||||
|
||||
!!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
|
||||
|
||||
6. Create the conda environment:
|
||||
|
||||
```bash
|
||||
conda env update
|
||||
```
|
||||
|
||||
This will create a new environment named `invokeai` and install all InvokeAI
|
||||
dependencies into it. If something goes wrong you should take a look at
|
||||
[troubleshooting](#troubleshooting).
|
||||
|
||||
7. Activate the `invokeai` environment:
|
||||
|
||||
In order to use the newly created environment you will first need to
|
||||
activate it
|
||||
|
||||
```bash
|
||||
conda activate invokeai
|
||||
```
|
||||
|
||||
Your command-line prompt should change to indicate that `invokeai` is active
|
||||
by prepending `(invokeai)`.
|
||||
|
||||
8. Pre-Load the model weights files:
|
||||
|
||||
!!! tip
|
||||
|
||||
If you have already downloaded the weights file(s) for another Stable
|
||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [here](INSTALLING_MODELS.md).
|
||||
|
||||
```bash
|
||||
python scripts/preload_models.py
|
||||
```
|
||||
|
||||
The script `preload_models.py` will interactively guide you through the
|
||||
process of downloading and installing the weights files needed for InvokeAI.
|
||||
Note that the main Stable Diffusion weights file is protected by a license
|
||||
agreement that you have to agree to. The script will list the steps you need
|
||||
to take to create an account on the site that hosts the weights files,
|
||||
accept the agreement, and provide an access token that allows InvokeAI to
|
||||
legally download and install the weights files.
|
||||
|
||||
If you get an error message about a module not being installed, check that
|
||||
the `invokeai` environment is active and if not, repeat step 5.
|
||||
|
||||
9. Run the command-line- or the web- interface:
|
||||
|
||||
!!! example ""
|
||||
|
||||
!!! warning "Make sure that the conda environment is activated, which should create `(invokeai)` in front of your prompt!"
|
||||
|
||||
=== "CLI"
|
||||
|
||||
```bash
|
||||
python scripts/invoke.py
|
||||
```
|
||||
|
||||
=== "local Webserver"
|
||||
|
||||
```bash
|
||||
python scripts/invoke.py --web
|
||||
```
|
||||
|
||||
=== "Public Webserver"
|
||||
|
||||
```bash
|
||||
python scripts/invoke.py --web --host 0.0.0.0
|
||||
```
|
||||
|
||||
If you choose the run the web interface, point your browser at
|
||||
http://localhost:9090 in order to load the GUI.
|
||||
|
||||
10. Render away!
|
||||
|
||||
Browse the [features](../features/CLI.md) section to learn about all the things you
|
||||
can do with InvokeAI.
|
||||
|
||||
Note that some GPUs are slow to warm up. In particular, when using an AMD
|
||||
card with the ROCm driver, you may have to wait for over a minute the first
|
||||
time you try to generate an image. Fortunately, after the warm up period
|
||||
rendering will be fast.
|
||||
|
||||
11. Subsequently, to relaunch the script, be sure to run "conda activate
|
||||
invokeai", enter the `InvokeAI` directory, and then launch the invoke
|
||||
script. If you forget to activate the 'invokeai' environment, the script
|
||||
will fail with multiple `ModuleNotFound` errors.
|
||||
|
||||
## Updating to newer versions of the script
|
||||
|
||||
This distribution is changing rapidly. If you used the `git clone` method
|
||||
(step 5) to download the InvokeAI directory, then to update to the latest and
|
||||
greatest version, launch the Anaconda window, enter `InvokeAI` and type:
|
||||
|
||||
```bash
|
||||
git pull
|
||||
conda env update
|
||||
python scripts/preload_models.py --no-interactive #optional
|
||||
```
|
||||
|
||||
This will bring your local copy into sync with the remote one. The last step may
|
||||
be needed to take advantage of new features or released models. The
|
||||
`--no-interactive` flag will prevent the script from prompting you to download
|
||||
the big Stable Diffusion weights files.
|
||||
|
||||
## pip Install
|
||||
|
||||
To install InvokeAI with only the PIP package manager, please follow these
|
||||
steps:
|
||||
|
||||
1. Make sure you are using Python 3.9 or higher. The rest of the install
|
||||
procedure depends on this:
|
||||
|
||||
```bash
|
||||
python -V
|
||||
```
|
||||
|
||||
2. Install the `virtualenv` tool if you don't have it already:
|
||||
|
||||
```bash
|
||||
pip install virtualenv
|
||||
```
|
||||
|
||||
3. From within the InvokeAI top-level directory, create and activate a virtual
|
||||
environment named `invokeai`:
|
||||
|
||||
```bash
|
||||
virtualenv invokeai
|
||||
source invokeai/bin/activate
|
||||
```
|
||||
|
||||
4. Pick the correct `requirements*.txt` file for your hardware and operating
|
||||
system.
|
||||
|
||||
We have created a series of environment files suited for different operating
|
||||
systems and GPU hardware. They are located in the
|
||||
`environments-and-requirements` directory:
|
||||
|
||||
<figure markdown>
|
||||
|
||||
| filename | OS |
|
||||
| :---------------------------------: | :-------------------------------------------------------------: |
|
||||
| requirements-lin-amd.txt | Linux with an AMD (ROCm) GPU |
|
||||
| requirements-lin-arm64.txt | Linux running on arm64 systems |
|
||||
| requirements-lin-cuda.txt | Linux with an NVIDIA (CUDA) GPU |
|
||||
| requirements-mac-mps-cpu.txt | Macintoshes with MPS acceleration |
|
||||
| requirements-lin-win-colab-cuda.txt | Windows with an NVIDA (CUDA) GPU<br>(supports Google Colab too) |
|
||||
|
||||
</figure>
|
||||
|
||||
Select the appropriate requirements file, and make a link to it from
|
||||
`requirements.txt` in the top-level InvokeAI directory. The command to do
|
||||
this from the top-level directory is:
|
||||
|
||||
!!! example ""
|
||||
|
||||
=== "Macintosh and Linux"
|
||||
|
||||
!!! info "Replace `xxx` and `yyy` with the appropriate OS and GPU codes."
|
||||
|
||||
```bash
|
||||
ln -sf environments-and-requirements/requirements-xxx-yyy.txt requirements.txt
|
||||
```
|
||||
|
||||
=== "Windows"
|
||||
|
||||
!!! info "on Windows, admin privileges are required to make links, so we use the copy command instead"
|
||||
|
||||
```cmd
|
||||
copy environments-and-requirements\requirements-lin-win-colab-cuda.txt requirements.txt
|
||||
```
|
||||
|
||||
!!! warning
|
||||
|
||||
Please do not link or copy `environments-and-requirements/requirements-base.txt`.
|
||||
This is a base requirements file that does not have the platform-specific
|
||||
libraries. Also, be sure to link or copy the platform-specific file to
|
||||
a top-level file named `requirements.txt` as shown here. Running pip on
|
||||
a requirements file in a subdirectory will not work as expected.
|
||||
|
||||
When this is done, confirm that a file named `requirements.txt` has been
|
||||
created in the InvokeAI root directory and that it points to the correct
|
||||
file in `environments-and-requirements`.
|
||||
|
||||
5. Run PIP
|
||||
|
||||
Be sure that the `invokeai` environment is active before doing this:
|
||||
|
||||
```bash
|
||||
pip install --prefer-binary -r requirements.txt
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Here are some common issues and their suggested solutions.
|
||||
|
||||
### Conda
|
||||
|
||||
#### Conda fails before completing `conda update`
|
||||
|
||||
The usual source of these errors is a package incompatibility. While we have
|
||||
tried to minimize these, over time packages get updated and sometimes introduce
|
||||
incompatibilities.
|
||||
|
||||
We suggest that you search
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) or the "bugs-and-support"
|
||||
channel of the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
|
||||
|
||||
You may also try to install the broken packages manually using PIP. To do this,
|
||||
activate the `invokeai` environment, and run `pip install` with the name and
|
||||
version of the package that is causing the incompatibility. For example:
|
||||
|
||||
```bash
|
||||
pip install test-tube==0.7.5
|
||||
```
|
||||
|
||||
You can keep doing this until all requirements are satisfied and the `invoke.py`
|
||||
script runs without errors. Please report to
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) what you were able to do
|
||||
to work around the problem so that others can benefit from your investigation.
|
||||
|
||||
#### `preload_models.py` or `invoke.py` crashes at an early stage
|
||||
|
||||
This is usually due to an incomplete or corrupted Conda install. Make sure you
|
||||
have linked to the correct environment file and run `conda update` again.
|
||||
|
||||
If the problem persists, a more extreme measure is to clear Conda's caches and
|
||||
remove the `invokeai` environment:
|
||||
|
||||
```bash
|
||||
conda deactivate
|
||||
conda env remove -n invokeai
|
||||
conda clean -a
|
||||
conda update
|
||||
```
|
||||
|
||||
This removes all cached library files, including ones that may have been
|
||||
corrupted somehow. (This is not supposed to happen, but does anyway).
|
||||
|
||||
#### `invoke.py` crashes at a later stage
|
||||
|
||||
If the CLI or web site had been working ok, but something unexpected happens
|
||||
later on during the session, you've encountered a code bug that is probably
|
||||
unrelated to an install issue. Please search
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues), file a bug report, or
|
||||
ask for help on [Discord](https://discord.gg/ZmtBAhwWhy)
|
||||
|
||||
#### My renders are running very slowly
|
||||
|
||||
You may have installed the wrong torch (machine learning) package, and the
|
||||
system is running on CPU rather than the GPU. To check, look at the log messages
|
||||
that appear when `invoke.py` is first starting up. One of the earlier lines
|
||||
should say `Using device type cuda`. On AMD systems, it will also say "cuda",
|
||||
and on Macintoshes, it should say "mps". If instead the message says it is
|
||||
running on "cpu", then you may need to install the correct torch library.
|
||||
|
||||
You may be able to fix this by installing a different torch library. Here are
|
||||
the magic incantations for Conda and PIP.
|
||||
|
||||
!!! todo "For CUDA systems"
|
||||
|
||||
- conda
|
||||
|
||||
```bash
|
||||
conda install pytorch torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia
|
||||
```
|
||||
|
||||
- pip
|
||||
|
||||
```bash
|
||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116
|
||||
```
|
||||
|
||||
!!! todo "For AMD systems"
|
||||
|
||||
- conda
|
||||
|
||||
```bash
|
||||
conda activate invokeai
|
||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||
```
|
||||
|
||||
- pip
|
||||
|
||||
```bash
|
||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||
```
|
||||
|
||||
More information and troubleshooting tips can be found at https://pytorch.org.
|
17
docs/installation/INSTALL_PCP.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
title: Installing InvokeAI with the Pre-Compiled PIP Installer
|
||||
---
|
||||
|
||||
# THIS NEEDS TO BE FLESHED OUT
|
||||
|
||||
## Introduction
|
||||
|
||||
## Walkthrough
|
||||
|
||||
## Updating to newer versions
|
||||
|
||||
### Updating the stable version
|
||||
|
||||
### Updating to the development version
|
||||
|
||||
## Troubleshooting
|
156
docs/installation/INSTALL_SOURCE.md
Normal file
@ -0,0 +1,156 @@
|
||||
---
|
||||
title: Source Installer
|
||||
---
|
||||
|
||||
# The InvokeAI Source Installer
|
||||
|
||||
## Introduction
|
||||
|
||||
The source installer is a shell script that attempts to automate every step
|
||||
needed to install and run InvokeAI on a stock computer running recent versions
|
||||
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
||||
version of InvokeAI with the option to upgrade to experimental versions later.
|
||||
It is not as foolproof as the [InvokeAI installer](INSTALL_INVOKE.md)
|
||||
|
||||
Before you begin, make sure that you meet the
|
||||
[hardware requirements](index.md#Hardware_Requirements) and has the appropriate
|
||||
GPU drivers installed. In particular, if you are a Linux user with an AMD GPU
|
||||
installed, you may need to install the
|
||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||
|
||||
Installation requires roughly 18G of free disk space to load the libraries and
|
||||
recommended model weights files.
|
||||
|
||||
## Walk through
|
||||
|
||||
Though there are multiple steps, there really is only one click involved to kick
|
||||
off the process.
|
||||
|
||||
1. The source installer is distributed in ZIP files. Go to the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||
look for a series of files named:
|
||||
|
||||
- invokeAI-src-installer-mac.zip
|
||||
- invokeAI-src-installer-windows.zip
|
||||
- invokeAI-src-installer-linux.zip
|
||||
|
||||
Download the one that is appropriate for your operating system.
|
||||
|
||||
2. Unpack the zip file into a directory that has at least 18G of free space. Do
|
||||
_not_ unpack into a directory that has an earlier version of InvokeAI.
|
||||
|
||||
This will create a new directory named "InvokeAI". This example shows how
|
||||
this would look using the `unzip` command-line tool, but you may use any
|
||||
graphical or command-line Zip extractor:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> unzip invokeAI-windows.zip
|
||||
Archive: C: \Linco\Downloads\invokeAI-linux.zip
|
||||
creating: invokeAI\
|
||||
inflating: invokeAI\install.bat
|
||||
inflating: invokeAI\readme.txt
|
||||
```
|
||||
|
||||
3. If you are using a desktop GUI, double-click the installer file. It will be
|
||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||
Macintosh systems.
|
||||
|
||||
4. Alternatively, form the command line, run the shell script or .bat file:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd invokeAI
|
||||
C:\Documents\Linco\invokeAI> install.bat
|
||||
```
|
||||
|
||||
5. Sit back and let the install script work. It will install various binary
|
||||
requirements including Conda, Git and Python, then download the current
|
||||
InvokeAI code and install it along with its dependencies.
|
||||
|
||||
6. After installation completes, the installer will launch a script called
|
||||
`preload_models.py`, which will guide you through the first-time process of
|
||||
selecting one or more Stable Diffusion model weights files, downloading and
|
||||
configuring them.
|
||||
|
||||
Note that the main Stable Diffusion weights file is protected by a license
|
||||
agreement that you must agree to in order to use. The script will list the
|
||||
steps you need to take to create an account on the official site that hosts
|
||||
the weights files, accept the agreement, and provide an access token that
|
||||
allows InvokeAI to legally download and install the weights files.
|
||||
|
||||
If you have already downloaded the weights file(s) for another Stable
|
||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [Installing Models](INSTALLING_MODELS.md).
|
||||
|
||||
7. The script will now exit and you'll be ready to generate some images. The
|
||||
invokeAI directory will contain numerous files. Look for a shell script
|
||||
named `invoke.sh` (Linux/Mac) or `invoke.bat` (Windows). Launch the script
|
||||
by double-clicking it or typing its name at the command-line:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd invokeAI
|
||||
C:\Documents\Linco\invokeAI> invoke.bat
|
||||
```
|
||||
|
||||
The `invoke.bat` (`invoke.sh`) script will give you the choice of starting (1)
|
||||
the command-line interface, or (2) the web GUI. If you start the latter, you can
|
||||
load the user interface by pointing your browser at http://localhost:9090.
|
||||
|
||||
The `invoke` script also offers you a third option labeled "open the developer
|
||||
console". If you choose this option, you will be dropped into a command-line
|
||||
interface in which you can run python commands directly, access developer tools,
|
||||
and launch InvokeAI with customized options. To do the latter, you would launch
|
||||
the script `scripts/invoke.py` as shown in this example:
|
||||
|
||||
```cmd
|
||||
python scripts/invoke.py --web --max_load_models=3 \
|
||||
--model=waifu-1.3 --steps=30 --outdir=C:/Documents/AIPhotos
|
||||
```
|
||||
|
||||
These options are described in detail in the
|
||||
[Command-Line Interface](../features/CLI.md) documentation.
|
||||
|
||||
## Updating to newer versions
|
||||
|
||||
This section describes how to update InvokeAI to new versions of the software.
|
||||
|
||||
### Updating the stable version
|
||||
|
||||
This distribution is changing rapidly, and we add new features on a daily basis.
|
||||
To update to the latest released version (recommended), run the `update.sh`
|
||||
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
||||
release and re-run the `preload_models` script to download any updated models
|
||||
files that may be needed. You can also use this to add additional models that
|
||||
you did not select at installation time.
|
||||
|
||||
### Updating to the development version
|
||||
|
||||
There may be times that there is a feature in the `development` branch of
|
||||
InvokeAI that you'd like to take advantage of. Or perhaps there is a branch that
|
||||
corrects an annoying bug. To do this, you will use the developer's console.
|
||||
|
||||
From within the invokeAI directory, run the command `invoke.sh` (Linux/Mac) or
|
||||
`invoke.bat` (Windows) and selection option (3) to open the developers console.
|
||||
Then run the following command to get the `development branch`:
|
||||
|
||||
```bash
|
||||
git checkout development
|
||||
git pull
|
||||
conda env update
|
||||
```
|
||||
|
||||
You can now close the developer console and run `invoke` as before. If you get
|
||||
complaints about missing models, then you may need to do the additional step of
|
||||
running `preload_models.py`. This happens relatively infrequently. To do this,
|
||||
simply open up the developer's console again and type
|
||||
`python scripts/preload_models.py`.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you run into problems during or after installation, the InvokeAI team is
|
||||
available to help you. Either create an
|
||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
||||
make a request for help on the "bugs-and-support" channel of our
|
||||
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
||||
organization, but typically somebody will be available to help you within 24
|
||||
hours, and often much sooner.
|
59
docs/installation/index.md
Normal file
@ -0,0 +1,59 @@
|
||||
---
|
||||
title: Overview
|
||||
---
|
||||
|
||||
We offer several ways to install InvokeAI, each one suited to your
|
||||
experience and preferences.
|
||||
|
||||
1. [InvokeAI installer](INSTALL_INVOKE.md)
|
||||
|
||||
This is a installer script that installs InvokeAI and all the
|
||||
third party libraries it depends on. When a new version of
|
||||
InvokeAI is released, you will download and reinstall the new
|
||||
version.
|
||||
|
||||
This installer is designed for people who want the system to "just
|
||||
work", don't have an interest in tinkering with it, and do not
|
||||
care about upgrading to unreleased experimental features.
|
||||
|
||||
*Note that this script has difficulty on some Macintosh machines
|
||||
that have previously been used for Python development due to
|
||||
conflicting development tools versions. Mac developers may wish
|
||||
to try method (2) or one of the manual methods instead.
|
||||
|
||||
2. [Source code installer](INSTALL_SOURCE.md)
|
||||
|
||||
This is a script that will install InvokeAI and all its essential
|
||||
third party libraries. In contrast to the previous installer, it
|
||||
includes access to a "developer console" which will allow you to
|
||||
access experimental features on the development branch.
|
||||
|
||||
This method is recommended for individuals who are wish to stay
|
||||
on the cutting edge of InvokeAI development and are not afraid
|
||||
of occasional breakage.
|
||||
|
||||
3. [Manual Installation](INSTALL_MANUAL.md)
|
||||
|
||||
In this method you will manually run the commands needed to install
|
||||
InvokeAI and its dependencies. We offer two recipes: one suited to
|
||||
those who prefer the `conda` tool, and one suited to those who prefer
|
||||
`pip` and Python virtual environments.
|
||||
|
||||
This method is recommended for users who have previously used `conda`
|
||||
or `pip` in the past, developers, and anyone who wishes to remain on
|
||||
the cutting edge of future InvokeAI development and is willing to put
|
||||
up with occasional glitches and breakage.
|
||||
|
||||
4. [Docker Installation](INSTALL_DOCKER.md)
|
||||
|
||||
We also offer a method for creating Docker containers containing
|
||||
InvokeAI and its dependencies. This method is recommended for
|
||||
individuals with experience with Docker containers and understand
|
||||
the pluses and minuses of a container-based install.
|
||||
|
||||
5. [Jupyter Notebooks Installation](INSTALL_JUPYTER.md)
|
||||
|
||||
This method is suitable for running InvokeAI on a Google Colab
|
||||
account. It is recommended for individuals who have previously
|
||||
worked on the Colab and are comfortable with the Jupyter notebook
|
||||
environment.
|
@ -42,14 +42,25 @@ title: Manual Installation, Linux
|
||||
```
|
||||
|
||||
5. Use anaconda to copy necessary python packages, create a new python
|
||||
environment named `invokeai` and activate the environment.
|
||||
environment named `invokeai` and then activate the environment.
|
||||
|
||||
```bash
|
||||
(base) rm -rf src # (this is a precaution in case there is already a src directory)
|
||||
(base) ~/InvokeAI$ conda env create
|
||||
(base) ~/InvokeAI$ conda activate invokeai
|
||||
(invokeai) ~/InvokeAI$
|
||||
```
|
||||
!!! todo "For systems with a CUDA (Nvidia) card:"
|
||||
|
||||
```bash
|
||||
(base) rm -rf src # (this is a precaution in case there is already a src directory)
|
||||
(base) ~/InvokeAI$ conda env create -f environment-cuda.yml
|
||||
(base) ~/InvokeAI$ conda activate invokeai
|
||||
(invokeai) ~/InvokeAI$
|
||||
```
|
||||
|
||||
!!! todo "For systems with an AMD card (using ROCm driver):"
|
||||
|
||||
```bash
|
||||
(base) rm -rf src # (this is a precaution in case there is already a src directory)
|
||||
(base) ~/InvokeAI$ conda env create -f environment-AMD.yml
|
||||
(base) ~/InvokeAI$ conda activate invokeai
|
||||
(invokeai) ~/InvokeAI$
|
||||
```
|
||||
|
||||
After these steps, your command prompt will be prefixed by `(invokeai)` as
|
||||
shown above.
|
@ -13,22 +13,9 @@ one of the steps, so instead of cloning this repo, simply download the notebook
|
||||
from the link above and load it up in VSCode (with the appropriate extensions
|
||||
installed)/Jupyter/JupyterLab and start running the cells one-by-one.
|
||||
|
||||
Note that you will need NVIDIA drivers, Python 3.10, and Git installed
|
||||
beforehand - simplified
|
||||
[step-by-step instructions](https://github.com/invoke-ai/InvokeAI/wiki/Easy-peasy-Windows-install)
|
||||
are available in the wiki (you'll only need steps 1, 2, & 3 ).
|
||||
Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehand.
|
||||
|
||||
## **Manual Install**
|
||||
|
||||
### **pip**
|
||||
|
||||
See
|
||||
[Easy-peasy Windows install](https://github.com/invoke-ai/InvokeAI/wiki/Easy-peasy-Windows-install)
|
||||
in the wiki
|
||||
|
||||
---
|
||||
|
||||
### **Conda**
|
||||
## **Manual Install with Conda**
|
||||
|
||||
1. Install Anaconda3 (miniconda3 version) from [here](https://docs.anaconda.com/anaconda/install/windows/)
|
||||
|
||||
@ -52,23 +39,29 @@ in the wiki
|
||||
cd InvokeAI
|
||||
```
|
||||
|
||||
6. Run the following two commands:
|
||||
6. Run the following commands:
|
||||
|
||||
```batch title="step 6a"
|
||||
conda env create
|
||||
```
|
||||
!!! todo "For systems with a CUDA (Nvidia) card:"
|
||||
|
||||
```batch title="step 6b"
|
||||
conda activate invokeai
|
||||
```
|
||||
```bash
|
||||
rmdir src # (this is a precaution in case there is already a src directory)
|
||||
conda env create -f environment-cuda.yml
|
||||
conda activate invokeai
|
||||
(invokeai)>
|
||||
```
|
||||
|
||||
!!! todo "For systems with an AMD card (using ROCm driver):"
|
||||
|
||||
```bash
|
||||
rmdir src # (this is a precaution in case there is already a src directory)
|
||||
conda env create -f environment-AMD.yml
|
||||
conda activate invokeai
|
||||
(invokeai)>
|
||||
```
|
||||
|
||||
This will install all python requirements and activate the "invokeai" environment
|
||||
which sets PATH and other environment variables properly.
|
||||
|
||||
Note that the long form of the first command is `conda env create -f environment.yml`. If the
|
||||
environment file isn't specified, conda will default to `environment.yml`. You will need
|
||||
to provide the `-f` option if you wish to load a different environment file at any point.
|
||||
|
||||
7. Load the big stable diffusion weights files and a couple of smaller machine-learning models:
|
||||
|
||||
```bash
|
@ -1,65 +0,0 @@
|
||||
name: invokeai
|
||||
channels:
|
||||
- pytorch
|
||||
- conda-forge
|
||||
dependencies:
|
||||
- python=3.9.13
|
||||
- pip=22.2.2
|
||||
|
||||
- pytorch=1.12.1
|
||||
- torchvision=0.13.1
|
||||
|
||||
# I suggest to keep the other deps sorted for convenience.
|
||||
# To determine what the latest versions should be, run:
|
||||
#
|
||||
# ```shell
|
||||
# sed -E 's/invokeai/invokeai-updated/;20,99s/- ([^=]+)==.+/- \1/' environment-mac.yml > environment-mac-updated.yml
|
||||
# CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac-updated.yml && conda list -n invokeai-updated | awk ' {print " - " $1 "==" $2;} '
|
||||
# ```
|
||||
|
||||
- albumentations=1.2.1
|
||||
- coloredlogs=15.0.1
|
||||
- diffusers=0.6.0
|
||||
- einops=0.4.1
|
||||
- grpcio=1.46.4
|
||||
- humanfriendly=10.0
|
||||
- imageio=2.21.2
|
||||
- imageio-ffmpeg=0.4.7
|
||||
- imgaug=0.4.0
|
||||
- kornia=0.6.7
|
||||
- mpmath=1.2.1
|
||||
- nomkl # arm64 has only 1.0 while x64 needs 3.0
|
||||
- numpy=1.23.4
|
||||
- omegaconf=2.1.1
|
||||
- openh264=2.3.0
|
||||
- onnx=1.12.0
|
||||
- onnxruntime=1.12.1
|
||||
- pudb=2022.1
|
||||
- pytorch-lightning=1.7.7
|
||||
- scipy=1.9.3
|
||||
- streamlit=1.12.2
|
||||
- sympy=1.10.1
|
||||
- tensorboard=2.10.0
|
||||
- torchmetrics=0.10.1
|
||||
- py-opencv=4.6.0
|
||||
- flask=2.1.3
|
||||
- flask-socketio=5.3.0
|
||||
- flask-cors=3.0.10
|
||||
- eventlet=0.33.1
|
||||
- protobuf=3.20.1
|
||||
- send2trash=1.8.0
|
||||
- transformers=4.23.1
|
||||
- torch-fidelity=0.3.0
|
||||
- pip:
|
||||
- getpass_asterisk
|
||||
- dependency_injector==4.40.0
|
||||
- realesrgan==0.2.5.0
|
||||
- test-tube==0.7.5
|
||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- -e git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan
|
||||
- -e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||
- -e .
|
||||
variables:
|
||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
@ -3,43 +3,43 @@ channels:
|
||||
- pytorch
|
||||
- conda-forge
|
||||
dependencies:
|
||||
- python=3.9.*
|
||||
- pip>=22.2.2
|
||||
- albumentations=0.4.3
|
||||
- cudatoolkit
|
||||
- pytorch
|
||||
- torchvision
|
||||
- numpy=1.19
|
||||
- imageio=2.9.0
|
||||
- opencv=4.6.0
|
||||
- pillow=8.*
|
||||
- einops=0.3.0
|
||||
- eventlet
|
||||
- flask-socketio=5.3.0
|
||||
- flask=2.1.*
|
||||
- flask_cors=3.0.10
|
||||
- flask-socketio=5.3.0
|
||||
- send2trash=1.8.0
|
||||
- eventlet
|
||||
- albumentations=0.4.3
|
||||
- pudb=2019.2
|
||||
- imageio-ffmpeg=0.4.2
|
||||
- pytorch-lightning=1.7.7
|
||||
- streamlit
|
||||
- einops=0.3.0
|
||||
- imageio=2.9.0
|
||||
- kornia=0.6
|
||||
- torchmetrics=0.7.0
|
||||
- transformers=4.21.3
|
||||
- torch-fidelity=0.3.0
|
||||
- numpy=1.19
|
||||
- opencv=4.6.0
|
||||
- pillow=8.*
|
||||
- pip>=22.2.2
|
||||
- pudb=2019.2
|
||||
- python=3.9.*
|
||||
- pytorch
|
||||
- pytorch-lightning=1.7.7
|
||||
- send2trash=1.8.0
|
||||
- streamlit
|
||||
- tokenizers>=0.11.1,!=0.11.3,<0.13
|
||||
- torch-fidelity=0.3.0
|
||||
- torchmetrics=0.7.0
|
||||
- torchvision
|
||||
- transformers=4.21.3
|
||||
- pip:
|
||||
- getpass_asterisk
|
||||
- omegaconf==2.1.1
|
||||
- realesrgan==0.2.5.0
|
||||
- test-tube>=0.7.5
|
||||
- pyreadline3
|
||||
- dependency_injector==4.40.0
|
||||
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- -e git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan
|
||||
- -e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||
- getpass_asterisk
|
||||
- gfpgan
|
||||
- omegaconf==2.1.1
|
||||
- pyreadline3
|
||||
- realesrgan
|
||||
- taming-transformers-rom1504
|
||||
- test-tube>=0.7.5
|
||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- -e .
|
||||
variables:
|
||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
45
environments-and-requirements/environment-lin-amd.yml
Normal file
@ -0,0 +1,45 @@
|
||||
name: invokeai
|
||||
channels:
|
||||
- pytorch
|
||||
- conda-forge
|
||||
- defaults
|
||||
dependencies:
|
||||
- python>=3.9
|
||||
- pip=22.2.2
|
||||
- numpy=1.23.3
|
||||
- pip:
|
||||
- --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||
- albumentations==0.4.3
|
||||
- dependency_injector==4.40.0
|
||||
- diffusers==0.6.0
|
||||
- einops==0.3.0
|
||||
- eventlet
|
||||
- flask==2.1.3
|
||||
- flask_cors==3.0.10
|
||||
- flask_socketio==5.3.0
|
||||
- getpass_asterisk
|
||||
- gfpgan
|
||||
- imageio-ffmpeg==0.4.2
|
||||
- imageio==2.9.0
|
||||
- kornia==0.6.0
|
||||
- omegaconf==2.2.3
|
||||
- opencv-python==4.5.5.64
|
||||
- pillow==9.2.0
|
||||
- pudb==2019.2
|
||||
- pyreadline3
|
||||
- pytorch-lightning==1.7.7
|
||||
- realesrgan
|
||||
- send2trash==1.8.0
|
||||
- streamlit==1.12.0
|
||||
- taming-transformers-rom1504
|
||||
- test-tube>=0.7.5
|
||||
- torch
|
||||
- torch-fidelity==0.3.0
|
||||
- torchaudio
|
||||
- torchmetrics==0.7.0
|
||||
- torchvision
|
||||
- transformers==4.21.3
|
||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- -e .
|
@ -13,33 +13,33 @@ dependencies:
|
||||
- cudatoolkit=11.6
|
||||
- pip:
|
||||
- albumentations==0.4.3
|
||||
- opencv-python==4.5.5.64
|
||||
- pudb==2019.2
|
||||
- imageio==2.9.0
|
||||
- imageio-ffmpeg==0.4.2
|
||||
- pytorch-lightning==1.7.7
|
||||
- omegaconf==2.2.3
|
||||
- test-tube>=0.7.5
|
||||
- streamlit==1.12.0
|
||||
- send2trash==1.8.0
|
||||
- pillow==9.2.0
|
||||
- einops==0.3.0
|
||||
- pyreadline3
|
||||
- torch-fidelity==0.3.0
|
||||
- transformers==4.21.3
|
||||
- diffusers==0.6.0
|
||||
- torchmetrics==0.7.0
|
||||
- flask==2.1.3
|
||||
- flask_socketio==5.3.0
|
||||
- flask_cors==3.0.10
|
||||
- dependency_injector==4.40.0
|
||||
- diffusers==0.6.0
|
||||
- einops==0.3.0
|
||||
- eventlet
|
||||
- flask==2.1.3
|
||||
- flask_cors==3.0.10
|
||||
- flask_socketio==5.3.0
|
||||
- getpass_asterisk
|
||||
- gfpgan
|
||||
- imageio-ffmpeg==0.4.2
|
||||
- imageio==2.9.0
|
||||
- kornia==0.6.0
|
||||
- omegaconf==2.2.3
|
||||
- opencv-python==4.5.5.64
|
||||
- pillow==9.2.0
|
||||
- pudb==2019.2
|
||||
- pyreadline3
|
||||
- pytorch-lightning==1.7.7
|
||||
- realesrgan
|
||||
- send2trash==1.8.0
|
||||
- streamlit==1.12.0
|
||||
- taming-transformers-rom1504
|
||||
- test-tube>=0.7.5
|
||||
- torch-fidelity==0.3.0
|
||||
- torchmetrics==0.7.0
|
||||
- transformers==4.21.3
|
||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- -e git+https://github.com/invoke-ai/Real-ESRGAN.git#egg=realesrgan
|
||||
- -e git+https://github.com/invoke-ai/GFPGAN.git#egg=gfpgan
|
||||
- -e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- -e .
|
64
environments-and-requirements/environment-mac.yml
Normal file
@ -0,0 +1,64 @@
|
||||
name: invokeai
|
||||
channels:
|
||||
- pytorch
|
||||
- conda-forge
|
||||
- defaults
|
||||
dependencies:
|
||||
- python=3.10
|
||||
- pip>=22.2
|
||||
- pytorch=1.12
|
||||
- pytorch-lightning=1.7
|
||||
- torchvision=0.13
|
||||
- torchmetrics=0.10
|
||||
- torch-fidelity=0.3
|
||||
|
||||
# I suggest to keep the other deps sorted for convenience.
|
||||
# To determine what the latest versions should be, run:
|
||||
#
|
||||
# ```shell
|
||||
# sed -E 's/invokeai/invokeai-updated/;20,99s/- ([^=]+)==.+/- \1/' environment-mac.yml > environment-mac-updated.yml
|
||||
# CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac-updated.yml && conda list -n invokeai-updated | awk ' {print " - " $1 "==" $2;} '
|
||||
# ```
|
||||
|
||||
- albumentations=1.2
|
||||
- coloredlogs=15.0
|
||||
- diffusers=0.6
|
||||
- einops=0.3
|
||||
- eventlet
|
||||
- grpcio=1.46
|
||||
- flask=2.1
|
||||
- flask-socketio=5.3
|
||||
- flask-cors=3.0
|
||||
- humanfriendly=10.0
|
||||
- imageio=2.21
|
||||
- imageio-ffmpeg=0.4
|
||||
- imgaug=0.4
|
||||
- kornia=0.6
|
||||
- mpmath=1.2
|
||||
- nomkl=3
|
||||
- numpy=1.23
|
||||
- omegaconf=2.1
|
||||
- openh264=2.3
|
||||
- onnx=1.12
|
||||
- onnxruntime=1.12
|
||||
- pudb=2019.2
|
||||
- protobuf=3.20
|
||||
- py-opencv=4.6
|
||||
- scipy=1.9
|
||||
- streamlit=1.12
|
||||
- sympy=1.10
|
||||
- send2trash=1.8
|
||||
- tensorboard=2.10
|
||||
- transformers=4.23
|
||||
- pip:
|
||||
- getpass_asterisk
|
||||
- taming-transformers-rom1504
|
||||
- test-tube==0.7.5
|
||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- git+https://github.com/invoke-ai/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/Real-ESRGAN.git#egg=realesrgan
|
||||
- git+https://github.com/invoke-ai/GFPGAN.git#egg=gfpgan
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- -e .
|
||||
variables:
|
||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
46
environments-and-requirements/environment-win-cuda.yml
Normal file
@ -0,0 +1,46 @@
|
||||
name: invokeai
|
||||
channels:
|
||||
- pytorch
|
||||
- conda-forge
|
||||
- defaults
|
||||
dependencies:
|
||||
- python>=3.9
|
||||
- pip=22.2.2
|
||||
- numpy=1.23.3
|
||||
- torchvision=0.13.1
|
||||
- torchaudio=0.12.1
|
||||
- pytorch=1.12.1
|
||||
- cudatoolkit=11.6
|
||||
- pip:
|
||||
- albumentations==0.4.3
|
||||
- basicsr==1.4.1
|
||||
- dependency_injector==4.40.0
|
||||
- diffusers==0.6.0
|
||||
- einops==0.3.0
|
||||
- eventlet
|
||||
- flask==2.1.3
|
||||
- flask_cors==3.0.10
|
||||
- flask_socketio==5.3.0
|
||||
- getpass_asterisk
|
||||
- gfpgan
|
||||
- imageio-ffmpeg==0.4.2
|
||||
- imageio==2.9.0
|
||||
- kornia==0.6.0
|
||||
- omegaconf==2.2.3
|
||||
- opencv-python==4.5.5.64
|
||||
- pillow==9.2.0
|
||||
- pudb==2019.2
|
||||
- pyreadline3
|
||||
- pytorch-lightning==1.7.7
|
||||
- realesrgan
|
||||
- send2trash==1.8.0
|
||||
- streamlit==1.12.0
|
||||
- taming-transformers-rom1504
|
||||
- test-tube>=0.7.5
|
||||
- torch-fidelity==0.3.0
|
||||
- torchmetrics==0.7.0
|
||||
- transformers==4.21.3
|
||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- -e .
|
@ -1,41 +1,36 @@
|
||||
--prefer-binary
|
||||
|
||||
albumentations
|
||||
einops
|
||||
getpass_asterisk
|
||||
huggingface-hub
|
||||
imageio-ffmpeg
|
||||
imageio
|
||||
kornia
|
||||
# pip will resolve the version which matches torch
|
||||
albumentations
|
||||
dependency_injector==4.40.0
|
||||
diffusers
|
||||
einops
|
||||
eventlet
|
||||
flask==2.1.3
|
||||
flask_cors==3.0.10
|
||||
flask_socketio==5.3.0
|
||||
flaskwebgui==0.3.7
|
||||
getpass_asterisk
|
||||
gfpgan
|
||||
huggingface-hub
|
||||
imageio
|
||||
imageio-ffmpeg
|
||||
kornia
|
||||
numpy
|
||||
omegaconf
|
||||
opencv-python
|
||||
pillow
|
||||
pip>=22
|
||||
pudb
|
||||
pytorch-lightning==1.7.7
|
||||
scikit-image>=0.19
|
||||
streamlit
|
||||
pyreadline3
|
||||
# "CompVis/taming-transformers" IS NOT INSTALLABLE
|
||||
# This is a drop-in replacement
|
||||
pytorch-lightning==1.7.7
|
||||
realesrgan
|
||||
scikit-image>=0.19
|
||||
send2trash
|
||||
streamlit
|
||||
taming-transformers-rom1504
|
||||
test-tube
|
||||
torch-fidelity
|
||||
torchmetrics
|
||||
transformers==4.21.*
|
||||
flask==2.1.3
|
||||
flask_socketio==5.3.0
|
||||
flask_cors==3.0.10
|
||||
flaskwebgui==0.3.7
|
||||
send2trash
|
||||
dependency_injector==4.40.0
|
||||
eventlet
|
||||
realesrgan
|
||||
diffusers
|
||||
git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||
git+https://github.com/invoke-ai/Real-ESRGAN.git#egg=realesrgan
|
||||
git+https://github.com/invoke-ai/GFPGAN.git#egg=gfpgan
|
||||
-e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||
git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
@ -1,4 +1,4 @@
|
||||
-r requirements.txt
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
|
||||
# Get hardware-appropriate torch/torchvision
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm5.1.1 --trusted-host https://download.pytorch.org
|
3
environments-and-requirements/requirements-lin-arm64.txt
Normal file
@ -0,0 +1,3 @@
|
||||
--pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cpu
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
-e .
|
2
environments-and-requirements/requirements-lin-cuda.txt
Normal file
@ -0,0 +1,2 @@
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
-e .
|
@ -1,4 +1,4 @@
|
||||
-r requirements.txt
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
|
||||
protobuf==3.19.6
|
||||
torch<1.13.0
|
@ -1,7 +1,8 @@
|
||||
-r requirements.txt
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
|
||||
# Get hardware-appropriate torch/torchvision
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org
|
||||
basicsr==1.4.1
|
||||
torch==1.12.1
|
||||
torchvision==0.13.1
|
||||
-e .
|
@ -5,7 +5,7 @@
|
||||
- `python scripts/dream.py --web` serves both frontend and backend at
|
||||
http://localhost:9090
|
||||
|
||||
## Evironment
|
||||
## Environment
|
||||
|
||||
Install [node](https://nodejs.org/en/download/) (includes npm) and optionally
|
||||
[yarn](https://yarnpkg.com/getting-started/install).
|
||||
@ -15,7 +15,7 @@ packages.
|
||||
|
||||
## Dev
|
||||
|
||||
1. From `frontend/`, run `npm dev` / `yarn dev` to start the dev server.
|
||||
1. From `frontend/`, run `npm run dev` / `yarn dev` to start the dev server.
|
||||
2. Run `python scripts/dream.py --web`.
|
||||
3. Navigate to the dev server address e.g. `http://localhost:5173/`.
|
||||
|
||||
|
BIN
installer/WinLongPathsEnabled.reg
Normal file
@ -1,22 +1,29 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
# make the installer zip for linux and mac
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.sh invokeAI
|
||||
cp readme.txt invokeAI
|
||||
rm -rf InvokeAI
|
||||
mkdir -p InvokeAI
|
||||
cp install.sh InvokeAI
|
||||
cp readme.txt InvokeAI
|
||||
|
||||
zip -r invokeAI-linux.zip invokeAI
|
||||
zip -r invokeAI-mac.zip invokeAI
|
||||
zip -r InvokeAI-linux.zip InvokeAI
|
||||
zip -r InvokeAI-mac.zip InvokeAI
|
||||
|
||||
# make the installer zip for windows
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.bat invokeAI
|
||||
cp readme.txt invokeAI
|
||||
rm -rf InvokeAI
|
||||
mkdir -p InvokeAI
|
||||
cp install.bat InvokeAI
|
||||
cp readme.txt InvokeAI
|
||||
cp WinLongPathsEnabled.reg InvokeAI
|
||||
|
||||
zip -r invokeAI-windows.zip invokeAI
|
||||
zip -r InvokeAI-windows.zip InvokeAI
|
||||
|
||||
echo "The installer zips are ready to be distributed.."
|
||||
rm -rf InvokeAI
|
||||
|
||||
echo "The installer zips are ready for distribution."
|
||||
|
@ -1,115 +1,164 @@
|
||||
@echo off
|
||||
|
||||
@rem This script will install git and conda (if not found on the PATH variable)
|
||||
@rem This script will install git (if not found on the PATH variable)
|
||||
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
@rem For users who already have git and conda, this step will be skipped.
|
||||
@rem For users who already have git, this step will be skipped.
|
||||
|
||||
@rem Next, it'll checkout the project's git repo, if necessary.
|
||||
@rem Finally, it'll create the conda environment and preload the models.
|
||||
@rem Next, it'll download the project's source code.
|
||||
@rem Then it will download a self-contained, standalone Python and unpack it.
|
||||
@rem Finally, it'll create the Python virtual environment and preload the models.
|
||||
|
||||
@rem This enables a user to install this project without manually installing conda and git.
|
||||
@rem This enables a user to install this project without manually installing git or Python
|
||||
|
||||
echo "Installing InvokeAI.."
|
||||
echo.
|
||||
echo ***** Installing InvokeAI.. *****
|
||||
|
||||
@rem config
|
||||
set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba
|
||||
set PATH=c:\windows\system32
|
||||
|
||||
@rem Config
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
@rem https://mamba.readthedocs.io/en/latest/installation.html
|
||||
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
|
||||
set REPO_URL=https://github.com/invoke-ai/InvokeAI.git
|
||||
set umamba_exists=F
|
||||
@rem Change the download URL to an InvokeAI repo's release URL
|
||||
|
||||
@rem figure out whether git and conda needs to be installed
|
||||
if exist "%INSTALL_ENV_DIR%" set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
set RELEASE_SOURCEBALL=/archive/refs/heads/v2.1.3.tar.gz
|
||||
set PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
||||
set PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-x86_64-pc-windows-msvc-shared-install_only.tar.gz
|
||||
|
||||
set PACKAGES_TO_INSTALL=
|
||||
|
||||
call conda --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% conda
|
||||
|
||||
call git --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" EQU "0" set umamba_exists=T
|
||||
@rem Cleanup
|
||||
del /q .tmp1 .tmp2
|
||||
|
||||
@rem (if necessary) install git and conda into a contained environment
|
||||
@rem (if necessary) install git into a contained environment
|
||||
if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
||||
@rem download micromamba
|
||||
if "%umamba_exists%" == "F" (
|
||||
echo "Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to %MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||
echo ***** Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to micromamba.exe *****
|
||||
|
||||
mkdir "%MAMBA_ROOT_PREFIX%"
|
||||
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > micromamba.exe
|
||||
|
||||
@rem test the mamba binary
|
||||
echo Micromamba version:
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version
|
||||
)
|
||||
@rem test the mamba binary
|
||||
echo ***** Micromamba version: *****
|
||||
call micromamba.exe --version
|
||||
|
||||
@rem create the installer env
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" create -y --prefix "%INSTALL_ENV_DIR%"
|
||||
call micromamba.exe create -y --prefix "%INSTALL_ENV_DIR%"
|
||||
)
|
||||
|
||||
echo "Packages to install:%PACKAGES_TO_INSTALL%"
|
||||
echo ***** Packages to install:%PACKAGES_TO_INSTALL% *****
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge %PACKAGES_TO_INSTALL%
|
||||
call micromamba.exe install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge %PACKAGES_TO_INSTALL%
|
||||
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
echo "There was a problem while installing%PACKAGES_TO_INSTALL% using micromamba. Cannot continue."
|
||||
echo ----- There was a problem while installing "%PACKAGES_TO_INSTALL%" using micromamba. Cannot continue. -----
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
)
|
||||
|
||||
set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
del /q micromamba.exe
|
||||
|
||||
@rem get the repo (and load into the current directory)
|
||||
if not exist ".git" (
|
||||
call git config --global init.defaultBranch main
|
||||
call git init
|
||||
call git remote add origin %REPO_URL%
|
||||
call git fetch
|
||||
# call git checkout origin/main -ft
|
||||
call git checkout origin/release-candidate-2-1 -ft
|
||||
)
|
||||
@rem For 'git' only
|
||||
set PATH=%INSTALL_ENV_DIR%\Library\bin;%PATH%
|
||||
|
||||
@rem activate the base env
|
||||
call conda activate
|
||||
@rem Download/unpack/clean up InvokeAI release sourceball
|
||||
set err_msg=----- InvokeAI source download failed -----
|
||||
curl -L %RELEASE_URL%/%RELEASE_SOURCEBALL% --output InvokeAI.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
@rem create the environment
|
||||
call conda env remove -n invokeai
|
||||
call conda env create
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "Something went wrong while installing Python libraries and cannot continue.
|
||||
echo "Please visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods."
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
set err_msg=----- InvokeAI source unpack failed -----
|
||||
tar -zxf InvokeAI.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
del /q InvokeAI.tgz
|
||||
|
||||
set err_msg=----- InvokeAI source copy failed -----
|
||||
cd InvokeAI-*
|
||||
xcopy . .. /e /h
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
cd ..
|
||||
|
||||
@rem cleanup
|
||||
for /f %%i in ('dir /b InvokeAI-*') do rd /s /q %%i
|
||||
rd /s /q .dev_scripts .github docker-build tests
|
||||
del /q requirements.in requirements-mkdocs.txt shell.nix
|
||||
|
||||
echo ***** Unpacked InvokeAI source *****
|
||||
|
||||
@rem Download/unpack/clean up python-build-standalone
|
||||
set err_msg=----- Python download failed -----
|
||||
curl -L %PYTHON_BUILD_STANDALONE_URL%/%PYTHON_BUILD_STANDALONE% --output python.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- Python unpack failed -----
|
||||
tar -zxf python.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
del /q python.tgz
|
||||
|
||||
echo ***** Unpacked python-build-standalone *****
|
||||
|
||||
@rem create venv
|
||||
set err_msg=----- problem creating venv -----
|
||||
.\python\python -E -s -m venv .venv
|
||||
@rem In reality, the following is ALL that 'activate.bat' does,
|
||||
@rem aside from setting the prompt, which we don't care about
|
||||
set PYTHONPATH=
|
||||
set PATH=.venv\Scripts;%PATH%
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
echo ***** Created Python virtual environment *****
|
||||
|
||||
@rem Print venv's Python version
|
||||
set err_msg=----- problem calling venv's python -----
|
||||
echo We're running under
|
||||
.venv\Scripts\python --version
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- pip update failed -----
|
||||
.venv\Scripts\python -m pip install --no-cache-dir --no-warn-script-location --upgrade pip
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
echo ***** Updated pip *****
|
||||
|
||||
set err_msg=----- requirements file copy failed -----
|
||||
copy installer\py3.10-windows-x86_64-cuda-reqs.txt requirements.txt
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- main pip install failed -----
|
||||
.venv\Scripts\python -m pip install --no-cache-dir --no-warn-script-location -r requirements.txt
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- clipseg install failed -----
|
||||
.venv\Scripts\python -m pip install --no-cache-dir --no-warn-script-location git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- InvokeAI setup failed -----
|
||||
.venv\Scripts\python -m pip install --no-cache-dir --no-warn-script-location -e .
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
echo ***** Installed Python dependencies *****
|
||||
|
||||
call conda activate invokeai
|
||||
@rem preload the models
|
||||
call python scripts\preload_models.py
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "The preload_models.py script crashed or was cancelled."
|
||||
echo "InvokeAI is not ready to run. To run preload_models.py again,"
|
||||
echo "run the command 'update.bat' in this directory."
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
call .venv\Scripts\python scripts\preload_models.py
|
||||
set err_msg=----- model download clone failed -----
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
echo ***** Finished downloading models *****
|
||||
|
||||
echo ***** Installing invoke.bat ******
|
||||
copy installer\invoke.bat .\invoke.bat
|
||||
echo All done! Execute the file invoke.bat in this directory to start InvokeAI
|
||||
|
||||
@rem more cleanup
|
||||
rd /s /q installer installer_files
|
||||
|
||||
@rem tell the user their next steps
|
||||
echo ""
|
||||
echo "* InvokeAI installed successfully *"
|
||||
echo "You can now start generating images by double-clicking the 'invoke.bat' file (inside this folder)
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit 0
|
||||
exit
|
||||
|
||||
:err_exit
|
||||
echo %err_msg%
|
||||
pause
|
||||
exit
|
||||
|
@ -1,130 +1,211 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script will install git and conda (if not found on the PATH variable)
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
function _err_exit {
|
||||
if test "$1" -ne 0
|
||||
then
|
||||
echo -e "Error code $1; Error caught was '$2'"
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
# This script will install git (if not found on the PATH variable)
|
||||
# using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
# For users who already have git and conda, this step will be skipped.
|
||||
# For users who already have git, this step will be skipped.
|
||||
|
||||
# Next, it'll checkout the project's git repo, if necessary.
|
||||
# Finally, it'll create the conda environment and preload the models.
|
||||
# Next, it'll download the project's source code.
|
||||
# Then it will download a self-contained, standalone Python and unpack it.
|
||||
# Finally, it'll create the Python virtual environment and preload the models.
|
||||
|
||||
# This enables a user to install this project without manually installing conda and git.
|
||||
# This enables a user to install this project without manually installing git or Python
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
echo -e "\n***** Installing InvokeAI... *****\n"
|
||||
|
||||
echo "Installing InvokeAI.."
|
||||
echo ""
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
Linux*) OS_NAME="linux";;
|
||||
Darwin*) OS_NAME="mac";;
|
||||
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
|
||||
Darwin*) OS_NAME="darwin";;
|
||||
*) echo -e "\n----- Unknown OS: $OS_NAME! This script runs only on Linux or MacOS -----\n" && exit
|
||||
esac
|
||||
|
||||
OS_ARCH=$(uname -m)
|
||||
case "${OS_ARCH}" in
|
||||
x86_64*) OS_ARCH="64";;
|
||||
arm64*) OS_ARCH="arm64";;
|
||||
*) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit
|
||||
x86_64*) ;;
|
||||
arm64*) ;;
|
||||
*) echo -e "\n----- Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64 -----\n" && exit
|
||||
esac
|
||||
|
||||
# https://mamba.readthedocs.io/en/latest/installation.html
|
||||
if [ "$OS_NAME" == "linux" ] && [ "$OS_ARCH" == "arm64" ]; then OS_ARCH="aarch64"; fi
|
||||
MAMBA_OS_NAME=$OS_NAME
|
||||
MAMBA_ARCH=$OS_ARCH
|
||||
if [ "$OS_NAME" == "darwin" ]; then
|
||||
MAMBA_OS_NAME="osx"
|
||||
fi
|
||||
|
||||
if [ "$OS_ARCH" == "linux" ]; then
|
||||
MAMBA_ARCH="aarch64"
|
||||
fi
|
||||
|
||||
if [ "$OS_ARCH" == "x86_64" ]; then
|
||||
MAMBA_ARCH="64"
|
||||
fi
|
||||
|
||||
PY_ARCH=$OS_ARCH
|
||||
if [ "$OS_ARCH" == "arm64" ]; then
|
||||
PY_ARCH="aarch64"
|
||||
fi
|
||||
|
||||
# Compute device ('cd' segment of reqs files) detect goes here
|
||||
# This needs a ton of work
|
||||
# Suggestions:
|
||||
# - lspci
|
||||
# - check $PATH for nvidia-smi, gtt CUDA/GPU version from output
|
||||
# - Surely there's a similar utility for AMD?
|
||||
CD="cuda"
|
||||
if [ "$OS_NAME" == "darwin" ] && [ "$OS_ARCH" == "arm64" ]; then
|
||||
CD="mps"
|
||||
fi
|
||||
|
||||
# config
|
||||
export MAMBA_ROOT_PREFIX="$(pwd)/installer_files/mamba"
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${OS_NAME}-${OS_ARCH}/latest"
|
||||
REPO_URL="https://github.com/invoke-ai/InvokeAI.git"
|
||||
umamba_exists="F"
|
||||
|
||||
# figure out whether git and conda needs to be installed
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${MAMBA_OS_NAME}-${MAMBA_ARCH}/latest"
|
||||
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/v2.1.3.tar.gz
|
||||
PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
||||
if [ "$OS_NAME" == "darwin" ]; then
|
||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-apple-darwin-install_only.tar.gz
|
||||
elif [ "$OS_NAME" == "linux" ]; then
|
||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-unknown-linux-gnu-install_only.tar.gz
|
||||
fi
|
||||
|
||||
PACKAGES_TO_INSTALL=""
|
||||
|
||||
if ! hash "conda" &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL conda"; fi
|
||||
if ! hash "git" &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
|
||||
|
||||
if "$MAMBA_ROOT_PREFIX/micromamba" --version &>/dev/null; then umamba_exists="T"; fi
|
||||
|
||||
# (if necessary) install git and conda into a contained environment
|
||||
if [ "$PACKAGES_TO_INSTALL" != "" ]; then
|
||||
# download micromamba
|
||||
if [ "$umamba_exists" == "F" ]; then
|
||||
echo "Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to $MAMBA_ROOT_PREFIX/micromamba"
|
||||
echo -e "\n***** Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to micromamba *****\n"
|
||||
|
||||
mkdir -p "$MAMBA_ROOT_PREFIX"
|
||||
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvj bin/micromamba -O > "$MAMBA_ROOT_PREFIX/micromamba"
|
||||
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvj bin/micromamba -O > micromamba
|
||||
|
||||
chmod u+x "$MAMBA_ROOT_PREFIX/micromamba"
|
||||
chmod u+x "micromamba"
|
||||
|
||||
# test the mamba binary
|
||||
echo "Micromamba version:"
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" --version
|
||||
fi
|
||||
# test the mamba binary
|
||||
echo -e "\n***** Micromamba version: *****\n"
|
||||
"micromamba" --version
|
||||
|
||||
# create the installer env
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" create -y --prefix "$INSTALL_ENV_DIR"
|
||||
"micromamba" create -y --prefix "$INSTALL_ENV_DIR"
|
||||
fi
|
||||
|
||||
echo "Packages to install:$PACKAGES_TO_INSTALL"
|
||||
echo -e "\n***** Packages to install:$PACKAGES_TO_INSTALL *****\n"
|
||||
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge $PACKAGES_TO_INSTALL
|
||||
"micromamba" install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge $PACKAGES_TO_INSTALL
|
||||
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
echo "There was a problem while initializing micromamba. Cannot continue."
|
||||
echo -e "\n----- There was a problem while initializing micromamba. Cannot continue. -----\n"
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
rm -f micromamba.exe
|
||||
|
||||
# get the repo (and load into the current directory)
|
||||
if [ ! -e ".git" ]; then
|
||||
git config --global init.defaultBranch main
|
||||
git init
|
||||
git remote add origin "$REPO_URL"
|
||||
git fetch
|
||||
git checkout origin/release-candidate-2-1 -ft
|
||||
fi
|
||||
export PATH="$INSTALL_ENV_DIR/bin:$PATH"
|
||||
|
||||
# create the environment
|
||||
CONDA_BASEPATH=$(conda info --base)
|
||||
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
||||
# Download/unpack/clean up InvokeAI release sourceball
|
||||
_err_msg="\n----- InvokeAI source download failed -----\n"
|
||||
curl -L $RELEASE_URL/$RELEASE_SOURCEBALL --output InvokeAI.tgz
|
||||
_err_exit $? _err_msg
|
||||
_err_msg="\n----- InvokeAI source unpack failed -----\n"
|
||||
tar -zxf InvokeAI.tgz
|
||||
_err_exit $? _err_msg
|
||||
|
||||
conda activate
|
||||
rm -f InvokeAI.tgz
|
||||
|
||||
if [ "$OS_NAME" == "mac" ]; then
|
||||
PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-${OS_ARCH} conda env create -f environment-mac.yml
|
||||
else
|
||||
conda env remove -n invokeai
|
||||
conda env create -f environment.yml
|
||||
fi
|
||||
_err_msg="\n----- InvokeAI source copy failed -----\n"
|
||||
cd InvokeAI-*
|
||||
cp -r . ..
|
||||
_err_exit $? _err_msg
|
||||
cd ..
|
||||
|
||||
status=$?
|
||||
# cleanup
|
||||
rm -rf InvokeAI-*/
|
||||
rm -rf .dev_scripts/ .github/ docker-build/ tests/ requirements.in requirements-mkdocs.txt shell.nix
|
||||
|
||||
if test $status -ne 0
|
||||
then
|
||||
echo "Something went wrong while installing Python libraries and cannot continue."
|
||||
echo "Please visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods"
|
||||
else
|
||||
conda activate invokeai
|
||||
# preload the models
|
||||
echo "Calling the preload_models.py script"
|
||||
python scripts/preload_models.py
|
||||
status=$?
|
||||
if test $status -ne 0
|
||||
then
|
||||
echo "The preload_models.py script crashed or was cancelled."
|
||||
echo "InvokeAI is not ready to run. Try again by running"
|
||||
echo "update.sh in this directory."
|
||||
else
|
||||
# tell the user their next steps
|
||||
echo "You can now start generating images by running invoke.sh (inside this folder), using ./invoke.sh"
|
||||
fi
|
||||
fi
|
||||
echo -e "\n***** Unpacked InvokeAI source *****\n"
|
||||
|
||||
conda activate invokeai
|
||||
# Download/unpack/clean up python-build-standalone
|
||||
_err_msg="\n----- Python download failed -----\n"
|
||||
curl -L $PYTHON_BUILD_STANDALONE_URL/$PYTHON_BUILD_STANDALONE --output python.tgz
|
||||
_err_exit $? _err_msg
|
||||
_err_msg="\n----- Python unpack failed -----\n"
|
||||
tar -zxf python.tgz
|
||||
_err_exit $? _err_msg
|
||||
|
||||
rm -f python.tgz
|
||||
|
||||
echo -e "\n***** Unpacked python-build-standalone *****\n"
|
||||
|
||||
# create venv
|
||||
_err_msg="\n----- problem creating venv -----\n"
|
||||
./python/bin/python3 -E -s -m venv .venv
|
||||
_err_exit $? _err_msg
|
||||
# In reality, the following is ALL that 'activate.bat' does,
|
||||
# aside from setting the prompt, which we don't care about
|
||||
export PYTHONPATH=
|
||||
export PATH=.venv/bin:$PATH
|
||||
|
||||
echo -e "\n***** Created Python virtual environment *****\n"
|
||||
|
||||
# Print venv's Python version
|
||||
_err_msg="\n----- problem calling venv's python -----\n"
|
||||
echo -e "We're running under"
|
||||
.venv/bin/python3 --version
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- pip update failed -----\n"
|
||||
.venv/bin/python3 -m pip install --no-cache-dir --no-warn-script-location --upgrade pip
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Updated pip *****\n"
|
||||
|
||||
_err_msg="\n----- requirements file copy failed -----\n"
|
||||
cp installer/py3.10-${OS_NAME}-"${OS_ARCH}"-${CD}-reqs.txt requirements.txt
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- main pip install failed -----\n"
|
||||
.venv/bin/python3 -m pip install --no-cache-dir --no-warn-script-location -r requirements.txt
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- clipseg install failed -----\n"
|
||||
.venv/bin/python3 -m pip install --no-cache-dir --no-warn-script-location git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- InvokeAI setup failed -----\n"
|
||||
.venv/bin/python3 -m pip install --no-cache-dir --no-warn-script-location -e .
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Installed Python dependencies *****\n"
|
||||
|
||||
# preload the models
|
||||
.venv/bin/python3 scripts/preload_models.py
|
||||
_err_msg="\n----- model download clone failed -----\n"
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Finished downloading models *****\n"
|
||||
|
||||
echo -e "\n***** Installing invoke.sh ******\n"
|
||||
cp installer/invoke.sh .
|
||||
|
||||
# more cleanup
|
||||
rm -rf installer/ installer_files/
|
||||
|
||||
echo "All done! Run the command './invoke.sh' to start InvokeAI."
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
|
27
installer/invoke.bat
Normal file
@ -0,0 +1,27 @@
|
||||
@echo off
|
||||
|
||||
set PATH=c:\windows\system32
|
||||
set PATH=.venv\Scripts;%PATH%
|
||||
|
||||
echo Do you want to generate images using the
|
||||
echo 1. command-line
|
||||
echo 2. browser-based UI
|
||||
echo 3. open the developer console
|
||||
set /P restore="Please enter 1, 2 or 3: "
|
||||
IF /I "%restore%" == "1" (
|
||||
echo Starting the InvokeAI command-line..
|
||||
.venv\Scripts\python scripts\invoke.py
|
||||
) ELSE IF /I "%restore%" == "2" (
|
||||
echo Starting the InvokeAI browser-based UI..
|
||||
.venv\Scripts\python scripts\invoke.py --web
|
||||
) ELSE IF /I "%restore%" == "3" (
|
||||
echo Developer Console
|
||||
call where python
|
||||
call python --version
|
||||
|
||||
cmd /k
|
||||
) ELSE (
|
||||
echo Invalid selection
|
||||
pause
|
||||
exit /b
|
||||
)
|
24
installer/invoke.sh
Executable file
@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
PATH=.venv/scripts:$PATH
|
||||
|
||||
if [ "$0" != "bash" ]; then
|
||||
echo "Do you want to generate images using the"
|
||||
echo "1. command-line"
|
||||
echo "2. browser-based UI"
|
||||
echo "3. open the developer console"
|
||||
read -p "Please enter 1, 2, or 3: " yn
|
||||
case $yn in
|
||||
1 ) printf "\nStarting the InvokeAI command-line..\n"; .venv/bin/python scripts/invoke.py;;
|
||||
2 ) printf "\nStarting the InvokeAI browser-based UI..\n"; .venv/bin/python scripts/invoke.py --web;;
|
||||
3 ) printf "\nDeveloper Console:\n"; file_name=$(basename "${BASH_SOURCE[0]}"); bash --init-file "$file_name";;
|
||||
* ) echo "Invalid selection"; exit;;
|
||||
esac
|
||||
else # in developer console
|
||||
python --version
|
||||
echo "Press ^D to exit"
|
||||
export PS1="(InvokeAI) \u@\h \w> "
|
||||
fi
|
2069
installer/py3.10-darwin-arm64-mps-reqs.txt
Normal file
2069
installer/py3.10-darwin-x86_64-cpu-reqs.txt
Normal file
2066
installer/py3.10-linux-x86_64-cuda-reqs.txt
Normal file
2072
installer/py3.10-windows-x86_64-cuda-reqs.txt
Normal file
@ -3,9 +3,15 @@ InvokeAI
|
||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
Installation on Windows:
|
||||
Please double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
||||
NOTE: You might need to enable Windows Long Paths. If you're not sure,
|
||||
then you almost certainly need to. Simply double-click the 'WinLongPathsEnabled.reg'
|
||||
file. Note that you will need to have admin privileges in order to
|
||||
do this.
|
||||
|
||||
Please double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
||||
|
||||
Installation on Linux and Mac:
|
||||
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
||||
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
||||
|
||||
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh' file (on Linux/Mac) to start InvokeAI.
|
||||
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh'
|
||||
file (on Linux/Mac) to start InvokeAI.
|
||||
|
24
installer/requirements.in
Normal file
@ -0,0 +1,24 @@
|
||||
--prefer-binary
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||
--trusted-host https://download.pytorch.org
|
||||
albumentations
|
||||
diffusers
|
||||
eventlet
|
||||
flask_cors
|
||||
flask_socketio
|
||||
flaskwebgui
|
||||
getpass_asterisk
|
||||
imageio-ffmpeg
|
||||
pyreadline3
|
||||
realesrgan
|
||||
send2trash
|
||||
streamlit
|
||||
taming-transformers-rom1504
|
||||
test-tube
|
||||
torch-fidelity
|
||||
torchvision==0.13.1 ; platform_system == 'Darwin'
|
||||
torchvision==0.13.1+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
|
||||
transformers
|
||||
https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip
|
||||
https://github.com/TencentARC/GFPGAN/archive/2eac2033893ca7f427f4035d80fe95b92649ac56.zip
|
||||
https://github.com/invoke-ai/k-diffusion/archive/7f16b2c33411f26b3eae78d10648d625cb0c1095.zip
|
@ -561,17 +561,15 @@ class Generate:
|
||||
):
|
||||
# retrieve the seed from the image;
|
||||
seed = None
|
||||
image_metadata = None
|
||||
prompt = None
|
||||
|
||||
args = metadata_from_png(image_path)
|
||||
seed = args.seed
|
||||
prompt = args.prompt
|
||||
print(f'>> retrieved seed {seed} and prompt "{prompt}" from {image_path}')
|
||||
|
||||
if not seed:
|
||||
print('* Could not recover seed for image. Replacing with 42. This will not affect image quality')
|
||||
seed = 42
|
||||
args = metadata_from_png(image_path)
|
||||
seed = opt.seed or args.seed
|
||||
if seed is None or seed < 0:
|
||||
seed = random.randrange(0, np.iinfo(np.uint32).max)
|
||||
|
||||
prompt = opt.prompt or args.prompt or ''
|
||||
print(f'>> using seed {seed} and prompt "{prompt}" for {image_path}')
|
||||
|
||||
# try to reuse the same filename prefix as the original file.
|
||||
# we take everything up to the first period
|
||||
@ -618,7 +616,11 @@ class Generate:
|
||||
extend_instructions[direction]=int(pixels)
|
||||
except ValueError:
|
||||
print(f'** invalid extension instruction. Use <directions> <pixels>..., as in "top 64 left 128 right 64 bottom 64"')
|
||||
if len(extend_instructions)>0:
|
||||
|
||||
opt.seed = seed
|
||||
opt.prompt = prompt
|
||||
|
||||
if len(extend_instructions) > 0:
|
||||
restorer = Outcrop(image,self,)
|
||||
return restorer.process (
|
||||
extend_instructions,
|
||||
@ -802,6 +804,10 @@ class Generate:
|
||||
|
||||
# the model cache does the loading and offloading
|
||||
cache = self.model_cache
|
||||
if not cache.valid_model(model_name):
|
||||
print(f'** "{model_name}" is not a known model name. Please check your models.yaml file')
|
||||
return self.model
|
||||
|
||||
cache.print_vram_usage()
|
||||
|
||||
# have to get rid of all references to model in order
|
||||
@ -1029,7 +1035,9 @@ class Generate:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _check_for_erasure(self, image):
|
||||
def _check_for_erasure(self, image:Image.Image)->bool:
|
||||
if image.mode not in ('RGBA','RGB'):
|
||||
return False
|
||||
width, height = image.size
|
||||
pixdata = image.load()
|
||||
colored = 0
|
||||
|
@ -247,8 +247,6 @@ class Args(object):
|
||||
switches.append('--seamless')
|
||||
if a['hires_fix']:
|
||||
switches.append('--hires_fix')
|
||||
if a['strength'] and a['strength']>0:
|
||||
switches.append(f'-f {a["strength"]}')
|
||||
|
||||
# img2img generations have parameters relevant only to them and have special handling
|
||||
if a['init_img'] and len(a['init_img'])>0:
|
||||
@ -554,13 +552,13 @@ class Args(object):
|
||||
postprocessing_group.add_argument(
|
||||
'--gfpgan_model_path',
|
||||
type=str,
|
||||
default='experiments/pretrained_models/GFPGANv1.4.pth',
|
||||
default='./GFPGANv1.4.pth',
|
||||
help='Indicates the path to the GFPGAN model, relative to --gfpgan_dir.',
|
||||
)
|
||||
postprocessing_group.add_argument(
|
||||
'--gfpgan_dir',
|
||||
type=str,
|
||||
default='./src/gfpgan',
|
||||
default='./models/gfpgan',
|
||||
help='Indicates the directory containing the GFPGAN code.',
|
||||
)
|
||||
web_server_group.add_argument(
|
||||
@ -866,6 +864,11 @@ class Args(object):
|
||||
default=32,
|
||||
help='When outpainting, the tile size to use for filling outpaint areas',
|
||||
)
|
||||
postprocessing_group.add_argument(
|
||||
'--new_prompt',
|
||||
type=str,
|
||||
help='Change the text prompt applied during postprocessing (default, use original generation prompt)',
|
||||
)
|
||||
postprocessing_group.add_argument(
|
||||
'-ft',
|
||||
'--facetool',
|
||||
|
@ -47,7 +47,6 @@ def get_uc_and_c_and_ec(prompt_string_uncleaned, model, log_tokens=False, skip_n
|
||||
parsed_prompt = pp.parse_conjunction(prompt_string_cleaned).prompts[0]
|
||||
|
||||
parsed_negative_prompt: FlattenedPrompt = pp.parse_conjunction(unconditioned_words).prompts[0]
|
||||
print(f">> Parsed prompt to {parsed_prompt}")
|
||||
|
||||
conditioning = None
|
||||
cac_args:CrossAttentionControl.Arguments = None
|
||||
|
@ -63,7 +63,7 @@ class Generator():
|
||||
**kwargs
|
||||
)
|
||||
results = []
|
||||
seed = seed if seed is not None else self.new_seed()
|
||||
seed = seed if seed is not None and seed >= 0 else self.new_seed()
|
||||
first_seed = seed
|
||||
seed, initial_noise = self.generate_initial_noise(seed, width, height)
|
||||
|
||||
|
@ -169,7 +169,7 @@ class Inpaint(Img2Img):
|
||||
# Fill missing areas of original image
|
||||
init_filled = self.tile_fill_missing(
|
||||
self.pil_image.copy(),
|
||||
seed = self.seed,
|
||||
seed = self.seed if self.seed >= 0 else self.new_seed(),
|
||||
tile_size = tile_size
|
||||
)
|
||||
init_filled.paste(init_image, (0,0), init_image.split()[-1])
|
||||
|
@ -10,8 +10,6 @@ from ldm.models.diffusion.ddim import DDIMSampler
|
||||
from ldm.invoke.generator.omnibus import Omnibus
|
||||
from ldm.models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
||||
from PIL import Image
|
||||
from ldm.invoke.devices import choose_autocast
|
||||
from ldm.invoke.image_util import InitImageResizer
|
||||
|
||||
class Txt2Img2Img(Generator):
|
||||
def __init__(self, model, precision):
|
||||
@ -46,13 +44,16 @@ class Txt2Img2Img(Generator):
|
||||
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
|
||||
)
|
||||
|
||||
#x = self.get_noise(init_width, init_height)
|
||||
x = x_T
|
||||
|
||||
if self.free_gpu_mem and self.model.model.device != self.model.device:
|
||||
self.model.model.to(self.model.device)
|
||||
|
||||
samples, _ = sampler.sample(
|
||||
batch_size = 1,
|
||||
S = steps,
|
||||
x_T = x_T,
|
||||
x_T = x,
|
||||
conditioning = c,
|
||||
shape = shape,
|
||||
verbose = False,
|
||||
@ -68,21 +69,11 @@ class Txt2Img2Img(Generator):
|
||||
)
|
||||
|
||||
# resizing
|
||||
|
||||
image = self.sample_to_image(samples)
|
||||
image = InitImageResizer(image).resize(width, height)
|
||||
|
||||
image = np.array(image).astype(np.float32) / 255.0
|
||||
image = image[None].transpose(0, 3, 1, 2)
|
||||
image = torch.from_numpy(image)
|
||||
image = 2.0 * image - 1.0
|
||||
image = image.to(self.model.device)
|
||||
|
||||
scope = choose_autocast(self.precision)
|
||||
with scope(self.model.device.type):
|
||||
samples = self.model.get_first_stage_encoding(
|
||||
self.model.encode_first_stage(image)
|
||||
) # move back to latent space
|
||||
samples = torch.nn.functional.interpolate(
|
||||
samples,
|
||||
size=(height // self.downsampling_factor, width // self.downsampling_factor),
|
||||
mode="bilinear"
|
||||
)
|
||||
|
||||
t_enc = int(strength * steps)
|
||||
ddim_sampler = DDIMSampler(self.model, device=self.model.device)
|
||||
|
@ -41,15 +41,22 @@ class ModelCache(object):
|
||||
self.stack = [] # this is an LRU FIFO
|
||||
self.current_model = None
|
||||
|
||||
def valid_model(self, model_name:str)->bool:
|
||||
'''
|
||||
Given a model name, returns True if it is a valid
|
||||
identifier.
|
||||
'''
|
||||
return model_name in self.config
|
||||
|
||||
def get_model(self, model_name:str):
|
||||
'''
|
||||
Given a model named identified in models.yaml, return
|
||||
the model object. If in RAM will load into GPU VRAM.
|
||||
If on disk, will load from there.
|
||||
'''
|
||||
if model_name not in self.config:
|
||||
if not self.valid_model(model_name):
|
||||
print(f'** "{model_name}" is not a known model name. Please check your models.yaml file')
|
||||
return None
|
||||
return self.current_model
|
||||
|
||||
if self.current_model != model_name:
|
||||
if model_name not in self.models: # make room for a new one
|
||||
@ -102,10 +109,13 @@ class ModelCache(object):
|
||||
Set the default model. The change will not take
|
||||
effect until you call model_cache.commit()
|
||||
'''
|
||||
print(f'DEBUG: before set_default_model()\n{OmegaConf.to_yaml(self.config)}')
|
||||
assert model_name in self.models,f"unknown model '{model_name}'"
|
||||
for model in self.models:
|
||||
self.models[model].pop('default',None)
|
||||
self.models[model_name]['default'] = True
|
||||
config = self.config
|
||||
for model in config:
|
||||
config[model].pop('default',None)
|
||||
config[model_name]['default'] = True
|
||||
print(f'DEBUG: after set_default_model():\n{OmegaConf.to_yaml(self.config)}')
|
||||
|
||||
def list_models(self) -> dict:
|
||||
'''
|
||||
|
@ -636,7 +636,7 @@ def split_weighted_subprompts(text, skip_normalize=False)->list:
|
||||
weight_sum = sum(map(lambda x: x[1], parsed_prompts))
|
||||
if weight_sum == 0:
|
||||
print(
|
||||
"Warning: Subprompt weights add up to zero. Discarding and using even weights instead.")
|
||||
"* Warning: Subprompt weights add up to zero. Discarding and using even weights instead.")
|
||||
equal_weight = 1 / max(len(parsed_prompts), 1)
|
||||
return [(x[0], equal_weight) for x in parsed_prompts]
|
||||
return [(x[0], x[1] / weight_sum) for x in parsed_prompts]
|
||||
|
@ -284,6 +284,7 @@ class Completer(object):
|
||||
switch,partial_path = match.groups()
|
||||
partial_path = partial_path.lstrip()
|
||||
|
||||
|
||||
matches = list()
|
||||
path = os.path.expanduser(partial_path)
|
||||
|
||||
@ -321,6 +322,7 @@ class Completer(object):
|
||||
matches.append(
|
||||
switch+os.path.join(os.path.dirname(full_path), node)
|
||||
)
|
||||
|
||||
return matches
|
||||
|
||||
class DummyCompleter(Completer):
|
||||
|
@ -10,8 +10,9 @@ from PIL import Image
|
||||
class GFPGAN():
|
||||
def __init__(
|
||||
self,
|
||||
gfpgan_dir='src/gfpgan',
|
||||
gfpgan_model_path='experiments/pretrained_models/GFPGANv1.4.pth') -> None:
|
||||
gfpgan_dir='models/gfpgan',
|
||||
gfpgan_model_path='GFPGANv1.4.pth'
|
||||
) -> None:
|
||||
|
||||
self.model_path = os.path.join(gfpgan_dir, gfpgan_model_path)
|
||||
self.gfpgan_model_exists = os.path.isfile(self.model_path)
|
||||
@ -74,6 +75,7 @@ class GFPGAN():
|
||||
image = image.resize(res.size)
|
||||
res = Image.blend(image, res, strength)
|
||||
|
||||
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
self.gfpgan = None
|
||||
|
@ -28,11 +28,12 @@ class Outcrop(object):
|
||||
self.generate._set_sampler()
|
||||
|
||||
def wrapped_callback(img,seed,**kwargs):
|
||||
image_callback(img,orig_opt.seed,use_prefix=prefix,**kwargs)
|
||||
preferred_seed = orig_opt.seed if orig_opt.seed >= 0 else seed
|
||||
image_callback(img,preferred_seed,use_prefix=prefix,**kwargs)
|
||||
|
||||
result= self.generate.prompt2image(
|
||||
orig_opt.prompt,
|
||||
seed = orig_opt.seed, # uncomment to make it deterministic
|
||||
opt.prompt,
|
||||
seed = opt.seed or orig_opt.seed,
|
||||
sampler = self.generate.sampler,
|
||||
steps = opt.steps,
|
||||
cfg_scale = opt.cfg_scale,
|
||||
|
@ -35,8 +35,8 @@ from PIL import Image, ImageOps
|
||||
from torchvision import transforms
|
||||
|
||||
CLIP_VERSION = 'ViT-B/16'
|
||||
CLIPSEG_WEIGHTS = 'src/clipseg/weights/rd64-uni.pth'
|
||||
CLIPSEG_WEIGHTS_REFINED = 'src/clipseg/weights/rd64-uni-refined.pth'
|
||||
CLIPSEG_WEIGHTS = 'models/clipseg/clipseg_weights/rd64-uni.pth'
|
||||
CLIPSEG_WEIGHTS_REFINED = 'models/clipseg/clipseg_weights/rd64-uni-refined.pth'
|
||||
CLIPSEG_SIZE = 352
|
||||
|
||||
class SegmentedGrayscale(object):
|
||||
|
@ -1,10 +1,13 @@
|
||||
from enum import Enum
|
||||
import enum
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
|
||||
# adapted from bloc97's CrossAttentionControl colab
|
||||
# https://github.com/bloc97/CrossAttentionControl
|
||||
|
||||
|
||||
|
||||
class CrossAttentionControl:
|
||||
|
||||
class Arguments:
|
||||
@ -27,7 +30,14 @@ class CrossAttentionControl:
|
||||
print('warning: cross-attention control options are not working properly for >1 edit')
|
||||
self.edit_options = non_none_edit_options[0]
|
||||
|
||||
|
||||
class Context:
|
||||
|
||||
class Action(enum.Enum):
|
||||
NONE = 0
|
||||
SAVE = 1,
|
||||
APPLY = 2
|
||||
|
||||
def __init__(self, arguments: 'CrossAttentionControl.Arguments', step_count: int):
|
||||
"""
|
||||
:param arguments: Arguments for the cross-attention control process
|
||||
@ -36,14 +46,124 @@ class CrossAttentionControl:
|
||||
self.arguments = arguments
|
||||
self.step_count = step_count
|
||||
|
||||
self.self_cross_attention_module_identifiers = []
|
||||
self.tokens_cross_attention_module_identifiers = []
|
||||
|
||||
self.saved_cross_attention_maps = {}
|
||||
|
||||
self.clear_requests(cleanup=True)
|
||||
|
||||
def register_cross_attention_modules(self, model):
|
||||
for name,module in CrossAttentionControl.get_attention_modules(model,
|
||||
CrossAttentionControl.CrossAttentionType.SELF):
|
||||
self.self_cross_attention_module_identifiers.append(name)
|
||||
for name,module in CrossAttentionControl.get_attention_modules(model,
|
||||
CrossAttentionControl.CrossAttentionType.TOKENS):
|
||||
self.tokens_cross_attention_module_identifiers.append(name)
|
||||
|
||||
def request_save_attention_maps(self, cross_attention_type: 'CrossAttentionControl.CrossAttentionType'):
|
||||
if cross_attention_type == CrossAttentionControl.CrossAttentionType.SELF:
|
||||
self.self_cross_attention_action = CrossAttentionControl.Context.Action.SAVE
|
||||
else:
|
||||
self.tokens_cross_attention_action = CrossAttentionControl.Context.Action.SAVE
|
||||
|
||||
def request_apply_saved_attention_maps(self, cross_attention_type: 'CrossAttentionControl.CrossAttentionType'):
|
||||
if cross_attention_type == CrossAttentionControl.CrossAttentionType.SELF:
|
||||
self.self_cross_attention_action = CrossAttentionControl.Context.Action.APPLY
|
||||
else:
|
||||
self.tokens_cross_attention_action = CrossAttentionControl.Context.Action.APPLY
|
||||
|
||||
def is_tokens_cross_attention(self, module_identifier) -> bool:
|
||||
return module_identifier in self.tokens_cross_attention_module_identifiers
|
||||
|
||||
def get_should_save_maps(self, module_identifier: str) -> bool:
|
||||
if module_identifier in self.self_cross_attention_module_identifiers:
|
||||
return self.self_cross_attention_action == CrossAttentionControl.Context.Action.SAVE
|
||||
elif module_identifier in self.tokens_cross_attention_module_identifiers:
|
||||
return self.tokens_cross_attention_action == CrossAttentionControl.Context.Action.SAVE
|
||||
return False
|
||||
|
||||
def get_should_apply_saved_maps(self, module_identifier: str) -> bool:
|
||||
if module_identifier in self.self_cross_attention_module_identifiers:
|
||||
return self.self_cross_attention_action == CrossAttentionControl.Context.Action.APPLY
|
||||
elif module_identifier in self.tokens_cross_attention_module_identifiers:
|
||||
return self.tokens_cross_attention_action == CrossAttentionControl.Context.Action.APPLY
|
||||
return False
|
||||
|
||||
def get_active_cross_attention_control_types_for_step(self, percent_through:float=None)\
|
||||
-> list['CrossAttentionControl.CrossAttentionType']:
|
||||
"""
|
||||
Should cross-attention control be applied on the given step?
|
||||
:param percent_through: How far through the step sequence are we (0.0=pure noise, 1.0=completely denoised image). Expected range 0.0..<1.0.
|
||||
:return: A list of attention types that cross-attention control should be performed for on the given step. May be [].
|
||||
"""
|
||||
if percent_through is None:
|
||||
return [CrossAttentionControl.CrossAttentionType.SELF, CrossAttentionControl.CrossAttentionType.TOKENS]
|
||||
|
||||
opts = self.arguments.edit_options
|
||||
to_control = []
|
||||
if opts['s_start'] <= percent_through and percent_through < opts['s_end']:
|
||||
to_control.append(CrossAttentionControl.CrossAttentionType.SELF)
|
||||
if opts['t_start'] <= percent_through and percent_through < opts['t_end']:
|
||||
to_control.append(CrossAttentionControl.CrossAttentionType.TOKENS)
|
||||
return to_control
|
||||
|
||||
def save_slice(self, identifier: str, slice: torch.Tensor, dim: Optional[int], offset: int,
|
||||
slice_size: Optional[int]):
|
||||
if identifier not in self.saved_cross_attention_maps:
|
||||
self.saved_cross_attention_maps[identifier] = {
|
||||
'dim': dim,
|
||||
'slice_size': slice_size,
|
||||
'slices': {offset or 0: slice}
|
||||
}
|
||||
else:
|
||||
self.saved_cross_attention_maps[identifier]['slices'][offset or 0] = slice
|
||||
|
||||
def get_slice(self, identifier: str, requested_dim: Optional[int], requested_offset: int, slice_size: int):
|
||||
saved_attention_dict = self.saved_cross_attention_maps[identifier]
|
||||
if requested_dim is None:
|
||||
if saved_attention_dict['dim'] is not None:
|
||||
raise RuntimeError(f"dim mismatch: expected dim=None, have {saved_attention_dict['dim']}")
|
||||
return saved_attention_dict['slices'][0]
|
||||
|
||||
if saved_attention_dict['dim'] == requested_dim:
|
||||
if slice_size != saved_attention_dict['slice_size']:
|
||||
raise RuntimeError(
|
||||
f"slice_size mismatch: expected slice_size={slice_size}, have {saved_attention_dict['slice_size']}")
|
||||
return saved_attention_dict['slices'][requested_offset]
|
||||
|
||||
if saved_attention_dict['dim'] == None:
|
||||
whole_saved_attention = saved_attention_dict['slices'][0]
|
||||
if requested_dim == 0:
|
||||
return whole_saved_attention[requested_offset:requested_offset + slice_size]
|
||||
elif requested_dim == 1:
|
||||
return whole_saved_attention[:, requested_offset:requested_offset + slice_size]
|
||||
|
||||
raise RuntimeError(f"Cannot convert dim {saved_attention_dict['dim']} to requested dim {requested_dim}")
|
||||
|
||||
def get_slicing_strategy(self, identifier: str) -> Optional[tuple[int, int]]:
|
||||
saved_attention = self.saved_cross_attention_maps.get(identifier, None)
|
||||
if saved_attention is None:
|
||||
return None, None
|
||||
return saved_attention['dim'], saved_attention['slice_size']
|
||||
|
||||
def clear_requests(self, cleanup=True):
|
||||
self.tokens_cross_attention_action = CrossAttentionControl.Context.Action.NONE
|
||||
self.self_cross_attention_action = CrossAttentionControl.Context.Action.NONE
|
||||
if cleanup:
|
||||
self.saved_cross_attention_maps = {}
|
||||
|
||||
def offload_saved_attention_slices_to_cpu(self):
|
||||
for key, map_dict in self.saved_cross_attention_maps.items():
|
||||
for offset, slice in map_dict['slices'].items():
|
||||
map_dict[offset] = slice.to('cpu')
|
||||
|
||||
@classmethod
|
||||
def remove_cross_attention_control(cls, model):
|
||||
cls.remove_attention_function(model)
|
||||
|
||||
@classmethod
|
||||
def setup_cross_attention_control(cls, model,
|
||||
cross_attention_control_args: Arguments
|
||||
):
|
||||
def setup_cross_attention_control(cls, model, context: Context):
|
||||
"""
|
||||
Inject attention parameters and functions into the passed in model to enable cross attention editing.
|
||||
|
||||
@ -53,7 +173,7 @@ class CrossAttentionControl:
|
||||
"""
|
||||
|
||||
# adapted from init_attention_edit
|
||||
device = cross_attention_control_args.edited_conditioning.device
|
||||
device = context.arguments.edited_conditioning.device
|
||||
|
||||
# urgh. should this be hardcoded?
|
||||
max_length = 77
|
||||
@ -61,141 +181,82 @@ class CrossAttentionControl:
|
||||
mask = torch.zeros(max_length)
|
||||
indices_target = torch.arange(max_length, dtype=torch.long)
|
||||
indices = torch.zeros(max_length, dtype=torch.long)
|
||||
for name, a0, a1, b0, b1 in cross_attention_control_args.edit_opcodes:
|
||||
for name, a0, a1, b0, b1 in context.arguments.edit_opcodes:
|
||||
if b0 < max_length:
|
||||
if name == "equal":# or (name == "replace" and a1 - a0 == b1 - b0):
|
||||
# these tokens have not been edited
|
||||
indices[b0:b1] = indices_target[a0:a1]
|
||||
mask[b0:b1] = 1
|
||||
|
||||
cls.inject_attention_function(model)
|
||||
|
||||
for m in cls.get_attention_modules(model, cls.CrossAttentionType.SELF):
|
||||
m.last_attn_slice_mask = None
|
||||
m.last_attn_slice_indices = None
|
||||
|
||||
for m in cls.get_attention_modules(model, cls.CrossAttentionType.TOKENS):
|
||||
m.last_attn_slice_mask = mask.to(device)
|
||||
m.last_attn_slice_indices = indices.to(device)
|
||||
context.register_cross_attention_modules(model)
|
||||
context.cross_attention_mask = mask.to(device)
|
||||
context.cross_attention_index_map = indices.to(device)
|
||||
cls.inject_attention_function(model, context)
|
||||
|
||||
|
||||
class CrossAttentionType(Enum):
|
||||
class CrossAttentionType(enum.Enum):
|
||||
SELF = 1
|
||||
TOKENS = 2
|
||||
|
||||
@classmethod
|
||||
def get_active_cross_attention_control_types_for_step(cls, context: 'CrossAttentionControl.Context', percent_through:float=None)\
|
||||
-> list['CrossAttentionControl.CrossAttentionType']:
|
||||
"""
|
||||
Should cross-attention control be applied on the given step?
|
||||
:param percent_through: How far through the step sequence are we (0.0=pure noise, 1.0=completely denoised image). Expected range 0.0..<1.0.
|
||||
:return: A list of attention types that cross-attention control should be performed for on the given step. May be [].
|
||||
"""
|
||||
if percent_through is None:
|
||||
return [cls.CrossAttentionType.SELF, cls.CrossAttentionType.TOKENS]
|
||||
|
||||
opts = context.arguments.edit_options
|
||||
to_control = []
|
||||
if opts['s_start'] <= percent_through and percent_through < opts['s_end']:
|
||||
to_control.append(cls.CrossAttentionType.SELF)
|
||||
if opts['t_start'] <= percent_through and percent_through < opts['t_end']:
|
||||
to_control.append(cls.CrossAttentionType.TOKENS)
|
||||
return to_control
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_attention_modules(cls, model, which: CrossAttentionType):
|
||||
which_attn = "attn1" if which is cls.CrossAttentionType.SELF else "attn2"
|
||||
return [module for name, module in model.named_modules() if
|
||||
return [(name,module) for name, module in model.named_modules() if
|
||||
type(module).__name__ == "CrossAttention" and which_attn in name]
|
||||
|
||||
@classmethod
|
||||
def clear_requests(cls, model, clear_attn_slice=True):
|
||||
self_attention_modules = cls.get_attention_modules(model, cls.CrossAttentionType.SELF)
|
||||
tokens_attention_modules = cls.get_attention_modules(model, cls.CrossAttentionType.TOKENS)
|
||||
for m in self_attention_modules+tokens_attention_modules:
|
||||
m.save_last_attn_slice = False
|
||||
m.use_last_attn_slice = False
|
||||
if clear_attn_slice:
|
||||
m.last_attn_slice = None
|
||||
|
||||
@classmethod
|
||||
def request_save_attention_maps(cls, model, cross_attention_type: CrossAttentionType):
|
||||
modules = cls.get_attention_modules(model, cross_attention_type)
|
||||
for m in modules:
|
||||
# clear out the saved slice in case the outermost dim changes
|
||||
m.last_attn_slice = None
|
||||
m.save_last_attn_slice = True
|
||||
|
||||
@classmethod
|
||||
def request_apply_saved_attention_maps(cls, model, cross_attention_type: CrossAttentionType):
|
||||
modules = cls.get_attention_modules(model, cross_attention_type)
|
||||
for m in modules:
|
||||
m.use_last_attn_slice = True
|
||||
|
||||
|
||||
|
||||
@classmethod
|
||||
def inject_attention_function(cls, unet):
|
||||
def inject_attention_function(cls, unet, context: 'CrossAttentionControl.Context'):
|
||||
# ORIGINAL SOURCE CODE: https://github.com/huggingface/diffusers/blob/91ddd2a25b848df0fa1262d4f1cd98c7ccb87750/src/diffusers/models/attention.py#L276
|
||||
|
||||
def attention_slice_wrangler(self, attention_scores, suggested_attention_slice, dim, offset, slice_size):
|
||||
def attention_slice_wrangler(module, suggested_attention_slice:torch.Tensor, dim, offset, slice_size):
|
||||
|
||||
#print("in wrangler with suggested_attention_slice shape", suggested_attention_slice.shape, "dim", dim)
|
||||
#memory_usage = suggested_attention_slice.element_size() * suggested_attention_slice.nelement()
|
||||
|
||||
attn_slice = suggested_attention_slice
|
||||
if dim is not None:
|
||||
start = offset
|
||||
end = start+slice_size
|
||||
#print(f"in wrangler, sliced dim {dim} {start}-{end}, use_last_attn_slice is {self.use_last_attn_slice}, save_last_attn_slice is {self.save_last_attn_slice}")
|
||||
#else:
|
||||
# print(f"in wrangler, whole, use_last_attn_slice is {self.use_last_attn_slice}, save_last_attn_slice is {self.save_last_attn_slice}")
|
||||
attention_slice = suggested_attention_slice
|
||||
|
||||
if self.use_last_attn_slice:
|
||||
if dim is None:
|
||||
last_attn_slice = self.last_attn_slice
|
||||
# print("took whole slice of shape", attn_slice.shape, "from complete shape", self.last_attn_slice.shape)
|
||||
if context.get_should_save_maps(module.identifier):
|
||||
#print(module.identifier, "saving suggested_attention_slice of shape",
|
||||
# suggested_attention_slice.shape, "dim", dim, "offset", offset)
|
||||
slice_to_save = attention_slice.to('cpu') if dim is not None else attention_slice
|
||||
context.save_slice(module.identifier, slice_to_save, dim=dim, offset=offset, slice_size=slice_size)
|
||||
elif context.get_should_apply_saved_maps(module.identifier):
|
||||
#print(module.identifier, "applying saved attention slice for dim", dim, "offset", offset)
|
||||
saved_attention_slice = context.get_slice(module.identifier, dim, offset, slice_size)
|
||||
|
||||
# slice may have been offloaded to CPU
|
||||
saved_attention_slice = saved_attention_slice.to(suggested_attention_slice.device)
|
||||
|
||||
if context.is_tokens_cross_attention(module.identifier):
|
||||
index_map = context.cross_attention_index_map
|
||||
remapped_saved_attention_slice = torch.index_select(saved_attention_slice, -1, index_map)
|
||||
this_attention_slice = suggested_attention_slice
|
||||
|
||||
mask = context.cross_attention_mask
|
||||
saved_mask = mask
|
||||
this_mask = 1 - mask
|
||||
attention_slice = remapped_saved_attention_slice * saved_mask + \
|
||||
this_attention_slice * this_mask
|
||||
else:
|
||||
last_attn_slice = self.last_attn_slice[offset]
|
||||
|
||||
if self.last_attn_slice_mask is None:
|
||||
# just use everything
|
||||
attn_slice = last_attn_slice
|
||||
else:
|
||||
last_attn_slice_mask = self.last_attn_slice_mask
|
||||
remapped_last_attn_slice = torch.index_select(last_attn_slice, -1, self.last_attn_slice_indices)
|
||||
attention_slice = saved_attention_slice
|
||||
|
||||
this_attn_slice = attn_slice
|
||||
this_attn_slice_mask = 1 - last_attn_slice_mask
|
||||
attn_slice = this_attn_slice * this_attn_slice_mask + \
|
||||
remapped_last_attn_slice * last_attn_slice_mask
|
||||
|
||||
if self.save_last_attn_slice:
|
||||
if dim is None:
|
||||
self.last_attn_slice = attn_slice
|
||||
else:
|
||||
if self.last_attn_slice is None:
|
||||
self.last_attn_slice = { offset: attn_slice }
|
||||
else:
|
||||
self.last_attn_slice[offset] = attn_slice
|
||||
|
||||
return attn_slice
|
||||
return attention_slice
|
||||
|
||||
for name, module in unet.named_modules():
|
||||
module_name = type(module).__name__
|
||||
if module_name == "CrossAttention":
|
||||
module.last_attn_slice = None
|
||||
module.last_attn_slice_indices = None
|
||||
module.last_attn_slice_mask = None
|
||||
module.use_last_attn_weights = False
|
||||
module.use_last_attn_slice = False
|
||||
module.save_last_attn_slice = False
|
||||
module.identifier = name
|
||||
module.set_attention_slice_wrangler(attention_slice_wrangler)
|
||||
module.set_slicing_strategy_getter(lambda module, module_identifier=name: \
|
||||
context.get_slicing_strategy(module_identifier))
|
||||
|
||||
@classmethod
|
||||
def remove_attention_function(cls, unet):
|
||||
# clear wrangler callback
|
||||
for name, module in unet.named_modules():
|
||||
module_name = type(module).__name__
|
||||
if module_name == "CrossAttention":
|
||||
module.set_attention_slice_wrangler(None)
|
||||
module.set_slicing_strategy_getter(None)
|
||||
|
||||
|
@ -1,9 +1,11 @@
|
||||
import traceback
|
||||
from math import ceil
|
||||
from typing import Callable, Optional, Union
|
||||
|
||||
import torch
|
||||
|
||||
from ldm.models.diffusion.cross_attention_control import CrossAttentionControl
|
||||
from ldm.modules.attention import get_mem_free_total
|
||||
|
||||
|
||||
class InvokeAIDiffuserComponent:
|
||||
@ -34,7 +36,7 @@ class InvokeAIDiffuserComponent:
|
||||
"""
|
||||
self.model = model
|
||||
self.model_forward_callback = model_forward_callback
|
||||
|
||||
self.cross_attention_control_context = None
|
||||
|
||||
def setup_cross_attention_control(self, conditioning: ExtraConditioningInfo, step_count: int):
|
||||
self.conditioning = conditioning
|
||||
@ -42,11 +44,7 @@ class InvokeAIDiffuserComponent:
|
||||
arguments=self.conditioning.cross_attention_control_args,
|
||||
step_count=step_count
|
||||
)
|
||||
CrossAttentionControl.setup_cross_attention_control(self.model,
|
||||
cross_attention_control_args=self.conditioning.cross_attention_control_args
|
||||
)
|
||||
#todo: refactor edited_conditioning, edit_opcodes, edit_options into a struct
|
||||
#todo: apply edit_options using step_count
|
||||
CrossAttentionControl.setup_cross_attention_control(self.model, self.cross_attention_control_context)
|
||||
|
||||
def remove_cross_attention_control(self):
|
||||
self.conditioning = None
|
||||
@ -54,6 +52,7 @@ class InvokeAIDiffuserComponent:
|
||||
CrossAttentionControl.remove_cross_attention_control(self.model)
|
||||
|
||||
|
||||
|
||||
def do_diffusion_step(self, x: torch.Tensor, sigma: torch.Tensor,
|
||||
unconditioning: Union[torch.Tensor,dict],
|
||||
conditioning: Union[torch.Tensor,dict],
|
||||
@ -70,12 +69,12 @@ class InvokeAIDiffuserComponent:
|
||||
:return: the new latents after applying the model to x using unscaled unconditioning and CFG-scaled conditioning.
|
||||
"""
|
||||
|
||||
CrossAttentionControl.clear_requests(self.model)
|
||||
|
||||
cross_attention_control_types_to_do = []
|
||||
context: CrossAttentionControl.Context = self.cross_attention_control_context
|
||||
if self.cross_attention_control_context is not None:
|
||||
percent_through = self.estimate_percent_through(step_index, sigma)
|
||||
cross_attention_control_types_to_do = CrossAttentionControl.get_active_cross_attention_control_types_for_step(self.cross_attention_control_context, percent_through)
|
||||
cross_attention_control_types_to_do = context.get_active_cross_attention_control_types_for_step(percent_through)
|
||||
|
||||
wants_cross_attention_control = (len(cross_attention_control_types_to_do) > 0)
|
||||
wants_hybrid_conditioning = isinstance(conditioning, dict)
|
||||
@ -124,7 +123,7 @@ class InvokeAIDiffuserComponent:
|
||||
return unconditioned_next_x, conditioned_next_x
|
||||
|
||||
|
||||
def apply_cross_attention_controlled_conditioning(self, x, sigma, unconditioning, conditioning, cross_attention_control_types_to_do):
|
||||
def apply_cross_attention_controlled_conditioning(self, x:torch.Tensor, sigma, unconditioning, conditioning, cross_attention_control_types_to_do):
|
||||
# print('pct', percent_through, ': doing cross attention control on', cross_attention_control_types_to_do)
|
||||
# slower non-batched path (20% slower on mac MPS)
|
||||
# We are only interested in using attention maps for conditioned_next_x, but batching them with generation of
|
||||
@ -134,32 +133,32 @@ class InvokeAIDiffuserComponent:
|
||||
# representing batched uncond + cond, but then when it comes to applying the saved attention, the
|
||||
# wrangler gets an attention tensor which only has shape[0]=8, representing just self.edited_conditionings.)
|
||||
# todo: give CrossAttentionControl's `wrangler` function more info so it can work with a batched call as well.
|
||||
context:CrossAttentionControl.Context = self.cross_attention_control_context
|
||||
|
||||
try:
|
||||
unconditioned_next_x = self.model_forward_callback(x, sigma, unconditioning)
|
||||
|
||||
# process x using the original prompt, saving the attention maps
|
||||
for type in cross_attention_control_types_to_do:
|
||||
CrossAttentionControl.request_save_attention_maps(self.model, type)
|
||||
#print("saving attention maps for", cross_attention_control_types_to_do)
|
||||
for ca_type in cross_attention_control_types_to_do:
|
||||
context.request_save_attention_maps(ca_type)
|
||||
_ = self.model_forward_callback(x, sigma, conditioning)
|
||||
CrossAttentionControl.clear_requests(self.model, clear_attn_slice=False)
|
||||
context.clear_requests(cleanup=False)
|
||||
|
||||
# process x again, using the saved attention maps to control where self.edited_conditioning will be applied
|
||||
for type in cross_attention_control_types_to_do:
|
||||
CrossAttentionControl.request_apply_saved_attention_maps(self.model, type)
|
||||
#print("applying saved attention maps for", cross_attention_control_types_to_do)
|
||||
for ca_type in cross_attention_control_types_to_do:
|
||||
context.request_apply_saved_attention_maps(ca_type)
|
||||
edited_conditioning = self.conditioning.cross_attention_control_args.edited_conditioning
|
||||
conditioned_next_x = self.model_forward_callback(x, sigma, edited_conditioning)
|
||||
context.clear_requests(cleanup=True)
|
||||
|
||||
CrossAttentionControl.clear_requests(self.model)
|
||||
|
||||
return unconditioned_next_x, conditioned_next_x
|
||||
|
||||
except RuntimeError:
|
||||
# make sure we clean out the attention slices we're storing on the model
|
||||
# TODO don't store things on the model
|
||||
CrossAttentionControl.clear_requests(self.model)
|
||||
except:
|
||||
context.clear_requests(cleanup=True)
|
||||
raise
|
||||
|
||||
return unconditioned_next_x, conditioned_next_x
|
||||
|
||||
def estimate_percent_through(self, step_index, sigma):
|
||||
if step_index is not None and self.cross_attention_control_context is not None:
|
||||
# percent_through will never reach 1.0 (but this is intended)
|
||||
|
@ -1,6 +1,6 @@
|
||||
from inspect import isfunction
|
||||
import math
|
||||
from typing import Callable
|
||||
from typing import Callable, Optional
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
@ -151,6 +151,17 @@ class SpatialSelfAttention(nn.Module):
|
||||
|
||||
return x+h_
|
||||
|
||||
def get_mem_free_total(device):
|
||||
#only on cuda
|
||||
if not torch.cuda.is_available():
|
||||
return None
|
||||
stats = torch.cuda.memory_stats(device)
|
||||
mem_active = stats['active_bytes.all.current']
|
||||
mem_reserved = stats['reserved_bytes.all.current']
|
||||
mem_free_cuda, _ = torch.cuda.mem_get_info(device)
|
||||
mem_free_torch = mem_reserved - mem_active
|
||||
mem_free_total = mem_free_cuda + mem_free_torch
|
||||
return mem_free_total
|
||||
|
||||
|
||||
class CrossAttention(nn.Module):
|
||||
@ -173,31 +184,43 @@ class CrossAttention(nn.Module):
|
||||
|
||||
self.mem_total_gb = psutil.virtual_memory().total // (1 << 30)
|
||||
|
||||
self.cached_mem_free_total = None
|
||||
self.attention_slice_wrangler = None
|
||||
self.slicing_strategy_getter = None
|
||||
|
||||
def set_attention_slice_wrangler(self, wrangler:Callable[[nn.Module, torch.Tensor, torch.Tensor, int, int, int], torch.Tensor]):
|
||||
def set_attention_slice_wrangler(self, wrangler: Optional[Callable[[nn.Module, torch.Tensor, int, int, int], torch.Tensor]]):
|
||||
'''
|
||||
Set custom attention calculator to be called when attention is calculated
|
||||
:param wrangler: Callback, with args (self, attention_scores, suggested_attention_slice, dim, offset, slice_size),
|
||||
:param wrangler: Callback, with args (module, suggested_attention_slice, dim, offset, slice_size),
|
||||
which returns either the suggested_attention_slice or an adjusted equivalent.
|
||||
self is the current CrossAttention module for which the callback is being invoked.
|
||||
attention_scores are the scores for attention
|
||||
suggested_attention_slice is a softmax(dim=-1) over attention_scores
|
||||
dim is -1 if the call is non-sliced, or 0 or 1 for dimension-0 or dimension-1 slicing.
|
||||
If dim is >= 0, offset and slice_size specify the slice start and length.
|
||||
`module` is the current CrossAttention module for which the callback is being invoked.
|
||||
`suggested_attention_slice` is the default-calculated attention slice
|
||||
`dim` is -1 if the attenion map has not been sliced, or 0 or 1 for dimension-0 or dimension-1 slicing.
|
||||
If `dim` is >= 0, `offset` and `slice_size` specify the slice start and length.
|
||||
|
||||
Pass None to use the default attention calculation.
|
||||
:return:
|
||||
'''
|
||||
self.attention_slice_wrangler = wrangler
|
||||
|
||||
def set_slicing_strategy_getter(self, getter: Optional[Callable[[nn.Module], tuple[int,int]]]):
|
||||
self.slicing_strategy_getter = getter
|
||||
|
||||
def cache_free_memory_count(self, device):
|
||||
self.cached_mem_free_total = get_mem_free_total(device)
|
||||
print("free cuda memory: ", self.cached_mem_free_total)
|
||||
|
||||
def clear_cached_free_memory_count(self):
|
||||
self.cached_mem_free_total = None
|
||||
|
||||
def einsum_lowest_level(self, q, k, v, dim, offset, slice_size):
|
||||
# calculate attention scores
|
||||
attention_scores = einsum('b i d, b j d -> b i j', q, k)
|
||||
# calculate attenion slice by taking the best scores for each latent pixel
|
||||
# calculate attention slice by taking the best scores for each latent pixel
|
||||
default_attention_slice = attention_scores.softmax(dim=-1, dtype=attention_scores.dtype)
|
||||
if self.attention_slice_wrangler is not None:
|
||||
attention_slice = self.attention_slice_wrangler(self, attention_scores, default_attention_slice, dim, offset, slice_size)
|
||||
attention_slice_wrangler = self.attention_slice_wrangler
|
||||
if attention_slice_wrangler is not None:
|
||||
attention_slice = attention_slice_wrangler(self, default_attention_slice, dim, offset, slice_size)
|
||||
else:
|
||||
attention_slice = default_attention_slice
|
||||
|
||||
@ -240,17 +263,26 @@ class CrossAttention(nn.Module):
|
||||
return self.einsum_op_slice_dim1(q, k, v, max(q.shape[1] // div, 1))
|
||||
|
||||
def einsum_op_cuda(self, q, k, v):
|
||||
stats = torch.cuda.memory_stats(q.device)
|
||||
mem_active = stats['active_bytes.all.current']
|
||||
mem_reserved = stats['reserved_bytes.all.current']
|
||||
mem_free_cuda, _ = torch.cuda.mem_get_info(q.device)
|
||||
mem_free_torch = mem_reserved - mem_active
|
||||
mem_free_total = mem_free_cuda + mem_free_torch
|
||||
# check if we already have a slicing strategy (this should only happen during cross-attention controlled generation)
|
||||
slicing_strategy_getter = self.slicing_strategy_getter
|
||||
if slicing_strategy_getter is not None:
|
||||
(dim, slice_size) = slicing_strategy_getter(self)
|
||||
if dim is not None:
|
||||
# print("using saved slicing strategy with dim", dim, "slice size", slice_size)
|
||||
if dim == 0:
|
||||
return self.einsum_op_slice_dim0(q, k, v, slice_size)
|
||||
elif dim == 1:
|
||||
return self.einsum_op_slice_dim1(q, k, v, slice_size)
|
||||
|
||||
# fallback for when there is no saved strategy, or saved strategy does not slice
|
||||
mem_free_total = self.cached_mem_free_total or get_mem_free_total(q.device)
|
||||
# Divide factor of safety as there's copying and fragmentation
|
||||
return self.einsum_op_tensor_mem(q, k, v, mem_free_total / 3.3 / (1 << 20))
|
||||
|
||||
|
||||
def get_attention_mem_efficient(self, q, k, v):
|
||||
if q.device.type == 'cuda':
|
||||
#print("in get_attention_mem_efficient with q shape", q.shape, ", k shape", k.shape, ", free memory is", get_mem_free_total(q.device))
|
||||
return self.einsum_op_cuda(q, k, v)
|
||||
|
||||
if q.device.type == 'mps':
|
||||
|
@ -65,10 +65,8 @@ def make_ddim_timesteps(
|
||||
if ddim_discr_method == 'uniform':
|
||||
c = num_ddpm_timesteps // num_ddim_timesteps
|
||||
if c < 1:
|
||||
c = 1
|
||||
|
||||
# remove 1 final step to prevent index out of bound error
|
||||
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))[:-1]
|
||||
c = 1
|
||||
ddim_timesteps = (np.arange(0, num_ddim_timesteps) * c).astype(int)
|
||||
elif ddim_discr_method == 'quad':
|
||||
ddim_timesteps = (
|
||||
(
|
||||
@ -86,6 +84,7 @@ def make_ddim_timesteps(
|
||||
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
|
||||
# add one to get the final alpha values right (the ones from first scale to data during sampling)
|
||||
steps_out = ddim_timesteps + 1
|
||||
# steps_out = ddim_timesteps
|
||||
|
||||
if verbose:
|
||||
print(f'Selected timesteps for ddim sampler: {steps_out}')
|
||||
|
@ -1,3 +0,0 @@
|
||||
[tool.blue]
|
||||
line-length = 90
|
||||
target-version = ['py310']
|
@ -1,27 +0,0 @@
|
||||
albumentations==0.4.3
|
||||
einops==0.3.0
|
||||
diffusers==0.6.0
|
||||
huggingface-hub==0.8.1
|
||||
imageio==2.9.0
|
||||
imageio-ffmpeg==0.4.2
|
||||
kornia==0.6.0
|
||||
numpy==1.23.1
|
||||
--pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cpu
|
||||
omegaconf==2.1.1
|
||||
opencv-python==4.6.0.66
|
||||
pillow==9.2.0
|
||||
pudb==2019.2
|
||||
torch==1.12.1
|
||||
torchvision==0.13.0
|
||||
pytorch-lightning==1.7.7
|
||||
streamlit==1.12.0
|
||||
test-tube>=0.7.5
|
||||
torch-fidelity==0.3.0
|
||||
torchmetrics==0.6.0
|
||||
transformers==4.21.3
|
||||
-e git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
-e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
-e git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion
|
||||
-e git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan
|
||||
-e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||
-e .
|
@ -29,6 +29,7 @@ infile = None
|
||||
def main():
|
||||
"""Initialize command-line parsers and the diffusion model"""
|
||||
global infile
|
||||
print('* Initializing, be patient...')
|
||||
|
||||
opt = Args()
|
||||
args = opt.parse_args()
|
||||
@ -46,7 +47,6 @@ def main():
|
||||
print('--max_loaded_models must be >= 1; using 1')
|
||||
args.max_loaded_models = 1
|
||||
|
||||
print('* Initializing, be patient...')
|
||||
from ldm.generate import Generate
|
||||
|
||||
# these two lines prevent a horrible warning message from appearing
|
||||
@ -90,7 +90,12 @@ def main():
|
||||
safety_checker=opt.safety_checker,
|
||||
max_loaded_models=opt.max_loaded_models,
|
||||
)
|
||||
except (FileNotFoundError, IOError, KeyError) as e:
|
||||
except FileNotFoundError:
|
||||
print('** You appear to be missing configs/models.yaml')
|
||||
print('** You can either exit this script and run scripts/preload_models.py, or fix the problem now.')
|
||||
emergency_model_create(opt)
|
||||
sys.exit(-1)
|
||||
except (IOError, KeyError) as e:
|
||||
print(f'{e}. Aborting.')
|
||||
sys.exit(-1)
|
||||
|
||||
@ -208,7 +213,10 @@ def main_loop(gen, opt):
|
||||
setattr(opt,attr,path)
|
||||
|
||||
# retrieve previous value of seed if requested
|
||||
if opt.seed is not None and opt.seed < 0:
|
||||
# Exception: for postprocess operations negative seed values
|
||||
# mean "discard the original seed and generate a new one"
|
||||
# (this is a non-obvious hack and needs to be reworked)
|
||||
if opt.seed is not None and opt.seed < 0 and operation != 'postprocess':
|
||||
try:
|
||||
opt.seed = last_results[opt.seed][1]
|
||||
print(f'>> Reusing previous seed {opt.seed}')
|
||||
@ -277,7 +285,7 @@ def main_loop(gen, opt):
|
||||
filename = f'{prefix}.{use_prefix}.{seed}.png'
|
||||
tm = opt.text_mask[0]
|
||||
th = opt.text_mask[1] if len(opt.text_mask)>1 else 0.5
|
||||
formatted_dream_prompt = f'!mask {opt.prompt} -tm {tm} {th}'
|
||||
formatted_dream_prompt = f'!mask {opt.input_file_path} -tm {tm} {th}'
|
||||
path = file_writer.save_image_and_prompt_to_png(
|
||||
image = image,
|
||||
dream_prompt = formatted_dream_prompt,
|
||||
@ -317,7 +325,7 @@ def main_loop(gen, opt):
|
||||
tool = re.match('postprocess:(\w+)',opt.last_operation).groups()[0]
|
||||
add_postprocessing_to_metadata(
|
||||
opt,
|
||||
opt.prompt,
|
||||
opt.input_file_path,
|
||||
filename,
|
||||
tool,
|
||||
formatted_dream_prompt,
|
||||
@ -482,6 +490,7 @@ def do_command(command:str, gen, opt:Args, completer) -> tuple:
|
||||
command = '-h'
|
||||
return command, operation
|
||||
|
||||
|
||||
def add_weights_to_config(model_path:str, gen, opt, completer):
|
||||
print(f'>> Model import in process. Please enter the values needed to configure this model:')
|
||||
print()
|
||||
@ -578,7 +587,7 @@ def write_config_file(conf_path, gen, model_name, new_config, clobber=False, mak
|
||||
|
||||
try:
|
||||
print('>> Verifying that new model loads...')
|
||||
yaml_str = gen.model_cache.add_model(model_name, new_config, clobber)
|
||||
gen.model_cache.add_model(model_name, new_config, clobber)
|
||||
assert gen.set_model(model_name) is not None, 'model failed to load'
|
||||
except AssertionError as e:
|
||||
print(f'** aborting **')
|
||||
@ -614,10 +623,16 @@ def do_textmask(gen, opt, callback):
|
||||
)
|
||||
|
||||
def do_postprocess (gen, opt, callback):
|
||||
file_path = opt.prompt # treat the prompt as the file pathname
|
||||
file_path = opt.prompt # treat the prompt as the file pathname
|
||||
if opt.new_prompt is not None:
|
||||
opt.prompt = opt.new_prompt
|
||||
else:
|
||||
opt.prompt = None
|
||||
|
||||
if os.path.dirname(file_path) == '': #basename given
|
||||
file_path = os.path.join(opt.outdir,file_path)
|
||||
|
||||
opt.input_file_path = file_path
|
||||
tool=None
|
||||
if opt.facetool_strength > 0:
|
||||
tool = opt.facetool
|
||||
@ -656,7 +671,10 @@ def do_postprocess (gen, opt, callback):
|
||||
def add_postprocessing_to_metadata(opt,original_file,new_file,tool,command):
|
||||
original_file = original_file if os.path.exists(original_file) else os.path.join(opt.outdir,original_file)
|
||||
new_file = new_file if os.path.exists(new_file) else os.path.join(opt.outdir,new_file)
|
||||
meta = retrieve_metadata(original_file)['sd-metadata']
|
||||
try:
|
||||
meta = retrieve_metadata(original_file)['sd-metadata']
|
||||
except AttributeError:
|
||||
meta = retrieve_metadata(new_file)['sd-metadata']
|
||||
if 'image' not in meta:
|
||||
meta = metadata_dumps(opt,seeds=[opt.seed])['image']
|
||||
meta['image'] = {}
|
||||
@ -704,7 +722,7 @@ def prepare_image_metadata(
|
||||
elif len(prior_variations) > 0:
|
||||
formatted_dream_prompt = opt.dream_prompt_str(seed=first_seed)
|
||||
elif operation == 'postprocess':
|
||||
formatted_dream_prompt = '!fix '+opt.dream_prompt_str(seed=seed)
|
||||
formatted_dream_prompt = '!fix '+opt.dream_prompt_str(seed=seed,prompt=opt.input_file_path)
|
||||
else:
|
||||
formatted_dream_prompt = opt.dream_prompt_str(seed=seed)
|
||||
return filename,formatted_dream_prompt
|
||||
@ -878,6 +896,36 @@ def write_commands(opt, file_path:str, outfilepath:str):
|
||||
f.write('\n'.join(commands))
|
||||
print(f'>> File {outfilepath} with commands created')
|
||||
|
||||
def emergency_model_create(opt:Args):
|
||||
completer = get_completer(opt)
|
||||
completer.complete_extensions(('.yaml','.yml','.ckpt','.vae.pt'))
|
||||
completer.set_default_dir('.')
|
||||
valid_path = False
|
||||
while not valid_path:
|
||||
weights_file = input('Enter the path to a downloaded models file, or ^C to exit: ')
|
||||
valid_path = os.path.exists(weights_file)
|
||||
dir,basename = os.path.split(weights_file)
|
||||
|
||||
valid_name = False
|
||||
while not valid_name:
|
||||
name = input('Enter a short name for this model (no spaces): ')
|
||||
name = 'unnamed model' if len(name)==0 else name
|
||||
valid_name = ' ' not in name
|
||||
|
||||
description = input('Enter a description for this model: ')
|
||||
description = 'no description' if len(description)==0 else description
|
||||
|
||||
with open(opt.conf, 'w', encoding='utf-8') as f:
|
||||
f.write(f'{name}:\n')
|
||||
f.write(f' description: {description}\n')
|
||||
f.write(f' weights: {weights_file}\n')
|
||||
f.write(f' config: ./configs/stable-diffusion/v1-inference.yaml\n')
|
||||
f.write(f' width: 512\n')
|
||||
f.write(f' height: 512\n')
|
||||
f.write(f' default: true\n')
|
||||
print(f'Config file {opt.conf} is created. This script will now exit.')
|
||||
print(f'After restarting you may examine the entry with !models and edit it with !edit.')
|
||||
|
||||
######################################
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -104,13 +104,15 @@ def postscript():
|
||||
print(
|
||||
'''\n** Model Installation Successful **\nYou're all set! You may now launch InvokeAI using one of these two commands:
|
||||
Web version:
|
||||
|
||||
python scripts/invoke.py --web (connect to http://localhost:9090)
|
||||
|
||||
Command-line version:
|
||||
|
||||
python scripts/invoke.py
|
||||
|
||||
Remember to activate that 'invokeai' environment before running invoke.py.
|
||||
|
||||
Or, if you used one of the automated installers, execute "invoke.sh" (Linux/Mac)
|
||||
or "invoke.bat" (Windows) to start the script.
|
||||
|
||||
Have fun!
|
||||
'''
|
||||
)
|
||||
@ -415,7 +417,7 @@ def download_kornia():
|
||||
|
||||
#---------------------------------------------
|
||||
def download_clip():
|
||||
print('Loading CLIP model...',end='')
|
||||
print('Loading CLIP model (ignore deprecation errors)...',end='')
|
||||
sys.stdout.flush()
|
||||
version = 'openai/clip-vit-large-patch14'
|
||||
tokenizer = CLIPTokenizer.from_pretrained(version)
|
||||
@ -424,7 +426,7 @@ def download_clip():
|
||||
|
||||
#---------------------------------------------
|
||||
def download_gfpgan():
|
||||
print('Installing models from RealESRGAN and facexlib...',end='')
|
||||
print('Installing models from RealESRGAN and facexlib (ignore deprecation errors)...',end='')
|
||||
try:
|
||||
from realesrgan import RealESRGANer
|
||||
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
|
||||
@ -442,19 +444,19 @@ def download_gfpgan():
|
||||
print('Error loading ESRGAN:')
|
||||
print(traceback.format_exc())
|
||||
|
||||
print('Loading models from GFPGAN')
|
||||
print('Loading models from GFPGAN...',end='')
|
||||
for model in (
|
||||
[
|
||||
'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth',
|
||||
'src/gfpgan/experiments/pretrained_models/GFPGANv1.4.pth'
|
||||
'models/gfpgan/GFPGANv1.4.pth'
|
||||
],
|
||||
[
|
||||
'https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth',
|
||||
'./gfpgan/weights/detection_Resnet50_Final.pth'
|
||||
'models/gfpgan/weights/detection_Resnet50_Final.pth'
|
||||
],
|
||||
[
|
||||
'https://github.com/xinntao/facexlib/releases/download/v0.2.2/parsing_parsenet.pth',
|
||||
'./gfpgan/weights/parsing_parsenet.pth'
|
||||
'models/gfpgan/weights/parsing_parsenet.pth'
|
||||
],
|
||||
):
|
||||
model_url,model_dest = model
|
||||
@ -489,22 +491,23 @@ def download_clipseg():
|
||||
import zipfile
|
||||
try:
|
||||
model_url = 'https://owncloud.gwdg.de/index.php/s/ioHbRzFx6th32hn/download'
|
||||
model_dest = 'src/clipseg/clipseg_weights.zip'
|
||||
weights_dir = 'src/clipseg/weights'
|
||||
if not os.path.exists(weights_dir):
|
||||
model_dest = 'models/clipseg/clipseg_weights'
|
||||
weights_zip = 'models/clipseg/weights.zip'
|
||||
|
||||
if not os.path.exists(model_dest):
|
||||
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
|
||||
if not os.path.exists('src/clipseg/weights/rd64-uni-refined.pth'):
|
||||
request.urlretrieve(model_url,model_dest)
|
||||
with zipfile.ZipFile(model_dest,'r') as zip:
|
||||
zip.extractall('src/clipseg')
|
||||
os.rename('src/clipseg/clipseg_weights','src/clipseg/weights')
|
||||
os.remove(model_dest)
|
||||
from clipseg_models.clipseg import CLIPDensePredT
|
||||
if not os.path.exists(f'{model_dest}/rd64-uni-refined.pth'):
|
||||
request.urlretrieve(model_url,weights_zip)
|
||||
with zipfile.ZipFile(weights_zip,'r') as zip:
|
||||
zip.extractall('models/clipseg')
|
||||
os.remove(weights_zip)
|
||||
|
||||
from clipseg.clipseg import CLIPDensePredT
|
||||
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64, )
|
||||
model.eval()
|
||||
model.load_state_dict(
|
||||
torch.load(
|
||||
'src/clipseg/weights/rd64-uni-refined.pth',
|
||||
'models/clipseg/clipseg_weights/rd64-uni-refined.pth',
|
||||
map_location=torch.device('cpu')
|
||||
),
|
||||
strict=False,
|
||||
|
3
setup.py
@ -3,7 +3,7 @@ from setuptools import setup, find_packages
|
||||
setup(
|
||||
name='invoke-ai',
|
||||
version='2.1.3',
|
||||
description='',
|
||||
description='InvokeAI text to image generation toolkit',
|
||||
packages=find_packages(),
|
||||
install_requires=[
|
||||
'torch',
|
||||
@ -11,3 +11,4 @@ setup(
|
||||
'tqdm',
|
||||
],
|
||||
)
|
||||
|
||||
|
23
source_installer/create_installers.sh
Executable file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
# make the installer zip for linux and mac
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.sh invokeAI
|
||||
cp readme.txt invokeAI
|
||||
|
||||
zip -r invokeAI-src-installer-linux.zip invokeAI
|
||||
zip -r invokeAI-src-installer-mac.zip invokeAI
|
||||
|
||||
# make the installer zip for windows
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.bat invokeAI
|
||||
cp readme.txt invokeAI
|
||||
cp WinLongPathsEnabled.reg invokeAI
|
||||
|
||||
zip -r invokeAI-src-installer-windows.zip invokeAI
|
||||
|
||||
echo "The installer zips are ready to be distributed.."
|
118
source_installer/install.bat
Normal file
@ -0,0 +1,118 @@
|
||||
@echo off
|
||||
|
||||
@rem This script will install git and conda (if not found on the PATH variable)
|
||||
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
@rem For users who already have git and conda, this step will be skipped.
|
||||
|
||||
@rem Next, it'll checkout the project's git repo, if necessary.
|
||||
@rem Finally, it'll create the conda environment and preload the models.
|
||||
|
||||
@rem This enables a user to install this project without manually installing conda and git.
|
||||
|
||||
echo "Installing InvokeAI.."
|
||||
echo.
|
||||
|
||||
@rem config
|
||||
set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
|
||||
set REPO_URL=https://github.com/invoke-ai/InvokeAI.git
|
||||
set umamba_exists=F
|
||||
@rem Change the download URL to an InvokeAI repo's release URL
|
||||
|
||||
@rem figure out whether git and conda needs to be installed
|
||||
if exist "%INSTALL_ENV_DIR%" set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
set PACKAGES_TO_INSTALL=
|
||||
|
||||
call conda --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% conda
|
||||
|
||||
call git --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" EQU "0" set umamba_exists=T
|
||||
|
||||
@rem (if necessary) install git and conda into a contained environment
|
||||
if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
||||
@rem download micromamba
|
||||
if "%umamba_exists%" == "F" (
|
||||
echo "Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to %MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||
|
||||
mkdir "%MAMBA_ROOT_PREFIX%"
|
||||
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||
|
||||
@rem test the mamba binary
|
||||
echo Micromamba version:
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version
|
||||
)
|
||||
|
||||
@rem create the installer env
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" create -y --prefix "%INSTALL_ENV_DIR%"
|
||||
)
|
||||
|
||||
echo "Packages to install:%PACKAGES_TO_INSTALL%"
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge %PACKAGES_TO_INSTALL%
|
||||
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
echo "There was a problem while installing%PACKAGES_TO_INSTALL% using micromamba. Cannot continue."
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
)
|
||||
|
||||
set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
@rem get the repo (and load into the current directory)
|
||||
if not exist ".git" (
|
||||
call git init
|
||||
call git config --local init.defaultBranch main
|
||||
call git remote add origin %REPO_URL%
|
||||
call git fetch
|
||||
call git checkout origin/main -ft
|
||||
)
|
||||
|
||||
@rem activate the base env
|
||||
call conda activate
|
||||
|
||||
@rem create the environment
|
||||
call conda env remove -n invokeai
|
||||
copy environments-and-requirements\environment-win-cuda.yml environment.yml
|
||||
call conda env create
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "Something went wrong while installing Python libraries and cannot continue.
|
||||
echo "Please visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods."
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
copy source_installer\invoke.bat invoke.bat
|
||||
copy source_installer\update.bat update.bat
|
||||
|
||||
call conda activate invokeai
|
||||
@rem preload the models
|
||||
call python scripts\preload_models.py
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "The preload_models.py script crashed or was cancelled."
|
||||
echo "InvokeAI is not ready to run. To run preload_models.py again,"
|
||||
echo "run the command 'update.bat' in this directory."
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
@rem tell the user their next steps
|
||||
echo ""
|
||||
echo "* InvokeAI installed successfully *"
|
||||
echo "You can now start generating images by double-clicking the 'invoke.bat' file (inside this folder)
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit 0
|
||||
|
138
source_installer/install.sh
Executable file
@ -0,0 +1,138 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script will install git and conda (if not found on the PATH variable)
|
||||
# using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
# For users who already have git and conda, this step will be skipped.
|
||||
|
||||
# Next, it'll checkout the project's git repo, if necessary.
|
||||
# Finally, it'll create the conda environment and preload the models.
|
||||
|
||||
# This enables a user to install this project without manually installing conda and git.
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
echo "Installing InvokeAI.."
|
||||
echo ""
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
Linux*) OS_NAME="linux";;
|
||||
Darwin*) OS_NAME="mac";;
|
||||
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
|
||||
esac
|
||||
|
||||
OS_ARCH=$(uname -m)
|
||||
case "${OS_ARCH}" in
|
||||
x86_64*) OS_ARCH="64";;
|
||||
arm64*) OS_ARCH="arm64";;
|
||||
*) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit
|
||||
esac
|
||||
|
||||
# https://mamba.readthedocs.io/en/latest/installation.html
|
||||
if [ "$OS_NAME" == "linux" ] && [ "$OS_ARCH" == "arm64" ]; then OS_ARCH="aarch64"; fi
|
||||
|
||||
# config
|
||||
export MAMBA_ROOT_PREFIX="$(pwd)/installer_files/mamba"
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${OS_NAME}-${OS_ARCH}/latest"
|
||||
REPO_URL="https://github.com/invoke-ai/InvokeAI.git"
|
||||
umamba_exists="F"
|
||||
|
||||
# figure out whether git and conda needs to be installed
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
|
||||
PACKAGES_TO_INSTALL=""
|
||||
if ! $(which conda) -V &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL conda"; fi
|
||||
if ! which git &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
|
||||
|
||||
if "$MAMBA_ROOT_PREFIX/micromamba" --version &>/dev/null; then umamba_exists="T"; fi
|
||||
|
||||
# (if necessary) install git and conda into a contained environment
|
||||
if [ "$PACKAGES_TO_INSTALL" != "" ]; then
|
||||
# download micromamba
|
||||
if [ "$umamba_exists" == "F" ]; then
|
||||
echo "Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to $MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
mkdir -p "$MAMBA_ROOT_PREFIX"
|
||||
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvj bin/micromamba -O > "$MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
chmod u+x "$MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
# test the mamba binary
|
||||
echo "Micromamba version:"
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" --version
|
||||
fi
|
||||
|
||||
# create the installer env
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" create -y --prefix "$INSTALL_ENV_DIR"
|
||||
fi
|
||||
|
||||
echo "Packages to install:$PACKAGES_TO_INSTALL"
|
||||
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge $PACKAGES_TO_INSTALL
|
||||
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
echo "There was a problem while initializing micromamba. Cannot continue."
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
|
||||
# get the repo (and load into the current directory)
|
||||
if [ ! -e ".git" ]; then
|
||||
git init
|
||||
git config --local init.defaultBranch main
|
||||
git remote add origin "$REPO_URL"
|
||||
git fetch
|
||||
git checkout origin/main -ft
|
||||
fi
|
||||
|
||||
# create the environment
|
||||
CONDA_BASEPATH=$(conda info --base)
|
||||
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
||||
|
||||
conda activate
|
||||
if [ "$OS_NAME" == "mac" ]; then
|
||||
echo "Macintosh system detected. Installing MPS and CPU support."
|
||||
ln -sf environments-and-requirements/environment-mac.yml environment.yml
|
||||
else
|
||||
if (lsmod | grep amdgpu) &>/dev/null ; then
|
||||
echo "Linux system with AMD GPU driver detected. Installing ROCm and CPU support"
|
||||
ln -sf environments-and-requirements/environment-lin-amd.yml environment.yml
|
||||
else
|
||||
echo "Linux system detected. Installing CUDA and CPU support."
|
||||
ln -sf environments-and-requirements/environment-lin-cuda.yml environment.yml
|
||||
fi
|
||||
fi
|
||||
conda env update
|
||||
|
||||
status=$?
|
||||
|
||||
if test $status -ne 0
|
||||
then
|
||||
echo "Something went wrong while installing Python libraries and cannot continue."
|
||||
echo "Please visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods"
|
||||
else
|
||||
ln -sf ./source_installer/invoke.sh .
|
||||
ln -sf ./source_installer/update.sh .
|
||||
|
||||
conda activate invokeai
|
||||
# preload the models
|
||||
echo "Calling the preload_models.py script"
|
||||
python scripts/preload_models.py
|
||||
status=$?
|
||||
if test $status -ne 0
|
||||
then
|
||||
echo "The preload_models.py script crashed or was cancelled."
|
||||
echo "InvokeAI is not ready to run. Try again by running"
|
||||
echo "update.sh in this directory."
|
||||
else
|
||||
# tell the user their next steps
|
||||
echo "You can now start generating images by running invoke.sh (inside this folder), using ./invoke.sh"
|
||||
fi
|
||||
fi
|
||||
|
||||
conda activate invokeai
|
@ -23,6 +23,7 @@ if [ "$0" != "bash" ]; then
|
||||
* ) echo "Invalid selection"; exit;;
|
||||
esac
|
||||
else # in developer console
|
||||
which python
|
||||
python --version
|
||||
echo "Press ^D to exit"
|
||||
export PS1="(InvokeAI) \u@\h \w> "
|
||||
fi
|
16
source_installer/readme.txt
Normal file
@ -0,0 +1,16 @@
|
||||
InvokeAI
|
||||
|
||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
Installation on Windows:
|
||||
You may need to enable Windows Long Paths to install InvokeAI. If you're not
|
||||
sure what this is, you almost certainly need to do this. Simply double-click the
|
||||
"WinLongPathsEnabled.reg" file located in this directory, and approve the Windows
|
||||
warnings. Note that you will need to have admin privileges in order to do this.
|
||||
|
||||
Then double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
||||
|
||||
Installation on Linux and Mac:
|
||||
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
||||
|
||||
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh' file (on Linux/Mac) to start InvokeAI.
|
@ -22,3 +22,5 @@ case "${OS_NAME}" in
|
||||
esac
|
||||
|
||||
python scripts/preload_models.py
|
||||
|
||||
|