Merge branch 'release-candidate-2-1-3' of github.com:/invoke-ai/InvokeAI into release-candidate-2-1-3

This commit is contained in:
Lincoln Stein 2022-11-10 11:21:56 +00:00
commit e481bfac61
8 changed files with 111 additions and 84 deletions

View File

@ -17,9 +17,9 @@ jobs:
- aarch64 - aarch64
include: include:
- arch: x86_64 - arch: x86_64
conda-env-file: environments-and-requirements/environment-lin-cuda.yml conda-env-file: environment-lin-cuda.yml
- arch: aarch64 - arch: aarch64
conda-env-file: environments-and-requirements/environment-lin-aarch64.yml conda-env-file: environment-lin-aarch64.yml
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: ${{ matrix.arch }} name: ${{ matrix.arch }}
steps: steps:

View File

@ -23,7 +23,7 @@ jobs:
- macOS-12 - macOS-12
include: include:
- os: ubuntu-latest - os: ubuntu-latest
environment-file: environment.yml environment-file: environment-lin-cuda.yml
default-shell: bash -l {0} default-shell: bash -l {0}
- os: macOS-12 - os: macOS-12
environment-file: environment-mac.yml environment-file: environment-mac.yml
@ -49,6 +49,9 @@ jobs:
- name: create models.yaml from example - name: create models.yaml from example
run: cp configs/models.yaml.example configs/models.yaml run: cp configs/models.yaml.example configs/models.yaml
- name: create environment.yml
run: cp environments-and-requirements/${{ matrix.environment-file }} environment.yml
- name: Use cached conda packages - name: Use cached conda packages
id: use-cached-conda-packages id: use-cached-conda-packages
uses: actions/cache@v3 uses: actions/cache@v3
@ -61,7 +64,7 @@ jobs:
uses: conda-incubator/setup-miniconda@v2 uses: conda-incubator/setup-miniconda@v2
with: with:
activate-environment: ${{ env.CONDA_ENV_NAME }} activate-environment: ${{ env.CONDA_ENV_NAME }}
environment-file: ${{ matrix.environment-file }} environment-file: environment.yml
miniconda-version: latest miniconda-version: latest
- name: set test prompt to main branch validation - name: set test prompt to main branch validation

View File

@ -43,33 +43,42 @@ RUN apt-get update \
ARG invokeai_git=invoke-ai/InvokeAI ARG invokeai_git=invoke-ai/InvokeAI
ARG invokeai_branch=main ARG invokeai_branch=main
ARG project_name=invokeai ARG project_name=invokeai
RUN git clone -b ${invokeai_branch} https://github.com/${invokeai_git}.git /${project_name} \ ARG conda_env_file=environment-lin-cuda.yml
&& cp /${project_name}/configs/models.yaml.example /${project_name}/configs/models.yaml \ RUN git clone -b ${invokeai_branch} https://github.com/${invokeai_git}.git "/${project_name}" \
&& ln -s /data/models/v1-5-pruned-emaonly.ckpt /${project_name}/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt \ && cp \
&& ln -s /data/outputs/ /${project_name}/outputs "/${project_name}/configs/models.yaml.example" \
"/${project_name}/configs/models.yaml" \
&& ln -sf \
"/${project_name}/environments-and-requirements/${conda_env_file}" \
"/${project_name}/environment.yml" \
&& ln -sf \
/data/models/v1-5-pruned-emaonly.ckpt \
"/${project_name}/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt" \
&& ln -sf \
/data/outputs/ \
"/${project_name}/outputs"
# set workdir # set workdir
WORKDIR /${project_name} WORKDIR "/${project_name}"
# install conda env and preload models # install conda env and preload models
ARG conda_prefix=/opt/conda ARG conda_prefix=/opt/conda
ARG conda_env_file=environment.yml COPY --from=get_miniconda "${conda_prefix}" "${conda_prefix}"
COPY --from=get_miniconda ${conda_prefix} ${conda_prefix} RUN source "${conda_prefix}/etc/profile.d/conda.sh" \
RUN source ${conda_prefix}/etc/profile.d/conda.sh \
&& conda init bash \ && conda init bash \
&& source ~/.bashrc \ && source ~/.bashrc \
&& conda env create \ && conda env create \
--name ${project_name} \ --name "${project_name}" \
--file ${conda_env_file} \
&& rm -Rf ~/.cache \ && rm -Rf ~/.cache \
&& conda clean -afy \ && conda clean -afy \
&& echo "conda activate ${project_name}" >> ~/.bashrc \ && echo "conda activate ${project_name}" >> ~/.bashrc
&& conda activate ${project_name} \
RUN source ~/.bashrc \
&& python scripts/preload_models.py \ && python scripts/preload_models.py \
--no-interactive --no-interactive
# Copy entrypoint and set env # Copy entrypoint and set env
ENV CONDA_PREFIX=${conda_prefix} ENV CONDA_PREFIX="${conda_prefix}"
ENV PROJECT_NAME=${project_name} ENV PROJECT_NAME="${project_name}"
COPY docker-build/entrypoint.sh / COPY docker-build/entrypoint.sh /
ENTRYPOINT [ "/entrypoint.sh" ] ENTRYPOINT [ "/entrypoint.sh" ]

View File

@ -8,7 +8,7 @@ source ./docker-build/env.sh || echo "please run from repository root" || exit 1
invokeai_conda_version=${INVOKEAI_CONDA_VERSION:-py39_4.12.0-${platform/\//-}} invokeai_conda_version=${INVOKEAI_CONDA_VERSION:-py39_4.12.0-${platform/\//-}}
invokeai_conda_prefix=${INVOKEAI_CONDA_PREFIX:-\/opt\/conda} invokeai_conda_prefix=${INVOKEAI_CONDA_PREFIX:-\/opt\/conda}
invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment.yml} invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment-lin-cuda.yml}
invokeai_git=${INVOKEAI_GIT:-invoke-ai/InvokeAI} invokeai_git=${INVOKEAI_GIT:-invoke-ai/InvokeAI}
invokeai_branch=${INVOKEAI_BRANCH:-main} invokeai_branch=${INVOKEAI_BRANCH:-main}
huggingface_token=${HUGGINGFACE_TOKEN?} huggingface_token=${HUGGINGFACE_TOKEN?}

View File

@ -2,7 +2,7 @@
title: WebUI Hotkey List title: WebUI Hotkey List
--- ---
# **WebUI Hotkey List** # :material-keyboard: **WebUI Hotkey List**
## General ## General
@ -19,7 +19,7 @@ title: WebUI Hotkey List
| ++ctrl+enter++ | Start processing | | ++ctrl+enter++ | Start processing |
| ++shift+x++ | cancel Processing | | ++shift+x++ | cancel Processing |
| ++shift+d++ | Toggle Dark Mode | | ++shift+d++ | Toggle Dark Mode |
| ` | Toggle console | | ++"`"++ | Toggle console |
## Tabs ## Tabs
@ -48,10 +48,10 @@ title: WebUI Hotkey List
| Setting | Hotkey | | Setting | Hotkey |
| ---------------------------- | --------------------- | | ---------------------------- | --------------------- |
| [ | Decrease brush size | | ++"["++ | Decrease brush size |
| ] | Increase brush size | | ++"]"++ | Increase brush size |
| alt + [ | Decrease mask opacity | | ++alt+"["++ | Decrease mask opacity |
| alt + ] | Increase mask opacity | | ++alt+"]"++ | Increase mask opacity |
| ++b++ | Select brush | | ++b++ | Select brush |
| ++e++ | Select eraser | | ++e++ | Select eraser |
| ++ctrl+z++ | Undo brush stroke | | ++ctrl+z++ | Undo brush stroke |

View File

@ -9,50 +9,49 @@ experience and preferences.
1. [1-click installer](INSTALL_1CLICK.md) 1. [1-click installer](INSTALL_1CLICK.md)
This is an automated shell script that will handle installation of This is an automated shell script that will handle installation of
all dependencies for you, and is recommended for those who have all dependencies for you, and is recommended for those who have
limited or no experience with the Python programming language, are limited or no experience with the Python programming language, are
not currently interested in contributing to the project, and just want not currently interested in contributing to the project, and just want
the thing to install and run. In this version, you interact with the the thing to install and run. In this version, you interact with the
web server and command-line clients through a shell script named web server and command-line clients through a shell script named
`invoke.sh` (Linux/Mac) or `invoke.bat` (Windows), and perform `invoke.sh` (Linux/Mac) or `invoke.bat` (Windows), and perform
updates using `update.sh` and `update.bat`. updates using `update.sh` and `update.bat`.
2. [Pre-compiled PIP installer](INSTALL_PCP.md) 2. [Pre-compiled PIP installer](INSTALL_PCP.md)
This is a series of installer files for which all the requirements This is a series of installer files for which all the requirements
for InvokeAI have been precompiled, thereby preventing the conflicts for InvokeAI have been precompiled, thereby preventing the conflicts
that sometimes occur when an external library is changed unexpectedly. that sometimes occur when an external library is changed unexpectedly.
It will leave you with an environment in which you interact directly It will leave you with an environment in which you interact directly
with the scripts for running the web and command line clients, and with the scripts for running the web and command line clients, and
you will update to new versions using standard developer commands. you will update to new versions using standard developer commands.
This method is recommended for users with a bit of experience using This method is recommended for users with a bit of experience using
the `git` and `pip` tools. the `git` and `pip` tools.
3. [Manual Installation](INSTALL_MANUAL.md) 3. [Manual Installation](INSTALL_MANUAL.md)
In this method you will manually run the commands needed to install In this method you will manually run the commands needed to install
InvokeAI and its dependencies. We offer two recipes: one suited to InvokeAI and its dependencies. We offer two recipes: one suited to
those who prefer the `conda` tool, and one suited to those who prefer those who prefer the `conda` tool, and one suited to those who prefer
`pip` and Python virtual environments. `pip` and Python virtual environments.
This method is recommended for users who have previously used `conda`
or `pip` in the past, developers, and anyone who wishes to remain on
the cutting edge of future InvokeAI development and is willing to put
up with occasional glitches and breakage.
This method is recommended for users who have previously used `conda`
or `pip` in the past, developers, and anyone who wishes to remain on
the cutting edge of future InvokeAI development and is willing to put
up with occasional glitches and breakage.
4. [Docker Installation](INSTALL_DOCKER.md) 4. [Docker Installation](INSTALL_DOCKER.md)
We also offer a method for creating Docker containers containing We also offer a method for creating Docker containers containing
InvokeAI and its dependencies. This method is recommended for InvokeAI and its dependencies. This method is recommended for
individuals with experience with Docker containers and understand individuals with experience with Docker containers and understand
the pluses and minuses of a container-based install. the pluses and minuses of a container-based install.
5. [Jupyter Notebooks Installation](INSTALL_JUPYTER.md) 5. [Jupyter Notebooks Installation](INSTALL_JUPYTER.md)
This method is suitable for running InvokeAI on a Google Colab This method is suitable for running InvokeAI on a Google Colab
account. It is recommended for individuals who have previously account. It is recommended for individuals who have previously
worked on the Colab and are comfortable with the Jupyter notebook worked on the Colab and are comfortable with the Jupyter notebook
environment. environment.

View File

@ -1,29 +1,28 @@
--- ---
Title: Docker title: Docker
--- ---
# :fontawesome-brands-docker: Docker # :fontawesome-brands-docker: Docker
## Before you begin ## Before you begin
- For end users: Install InvokeAI locally using the instructions for - For end users: Install InvokeAI locally using the instructions for your OS.
your OS.
- For developers: For container-related development tasks or for enabling easy - For developers: For container-related development tasks or for enabling easy
deployment to other environments (on-premises or cloud), follow these deployment to other environments (on-premises or cloud), follow these
instructions. For general use, install locally to leverage your machine's GPU. instructions. For general use, install locally to leverage your machine's GPU.
## Why containers? ## Why containers?
They provide a flexible, reliable way to build and deploy InvokeAI. They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
You'll also use a Docker volume to store the largest model files and image use a Docker volume to store the largest model files and image outputs as a
outputs as a first step in decoupling storage and compute. Future enhancements first step in decoupling storage and compute. Future enhancements can do this
can do this for other assets. See [Processes](https://12factor.net/processes) for other assets. See [Processes](https://12factor.net/processes) under the
under the Twelve-Factor App methodology for details on why running applications Twelve-Factor App methodology for details on why running applications in such a
in such a stateless fashion is important. stateless fashion is important.
You can specify the target platform when building the image and running the You can specify the target platform when building the image and running the
container. You'll also need to specify the InvokeAI requirements file container. You'll also need to specify the InvokeAI requirements file that
that matches the container's OS and the architecture it will run on. matches the container's OS and the architecture it will run on.
Developers on Apple silicon (M1/M2): You Developers on Apple silicon (M1/M2): You
[can't access your GPU cores from Docker containers](https://github.com/pytorch/pytorch/issues/81224) [can't access your GPU cores from Docker containers](https://github.com/pytorch/pytorch/issues/81224)
@ -65,13 +64,14 @@ created in the last step.
Some Suggestions of variables you may want to change besides the Token: Some Suggestions of variables you may want to change besides the Token:
| Environment-Variable | Description | | Environment-Variable | Default value | Description |
| ------------------------------------------------------------------- | ------------------------------------------------------------------------ | | ------------------------- | ----------------------------- | ---------------------------------------------------------------------------- |
| `HUGGINGFACE_TOKEN="hg_aewirhghlawrgkjbarug2"` | This is the only required variable, without you can't get the checkpoint | | `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without you can't get the checkpoint |
| `ARCH=aarch64` | if you are using a ARM based CPU | | `ARCH` | x86_64 | if you are using a ARM based CPU |
| `INVOKEAI_TAG=yourname/invokeai:latest` | the Container Repository / Tag which will be used | | `INVOKEAI_TAG` | invokeai-x86_64 | the Container Repository / Tag which will be used |
| `INVOKEAI_CONDA_ENV_FILE=environment-linux-aarch64.yml` | since environment.yml wouldn't work with aarch | | `INVOKEAI_CONDA_ENV_FILE` | environment-lin-cuda.yml | since environment.yml wouldn't work with aarch |
| `INVOKEAI_GIT="-b branchname https://github.com/username/reponame"` | if you want to use your own fork | | `INVOKEAI_GIT` | invoke-ai/InvokeAI | the repository to use |
| `INVOKEAI_BRANCH` | main | the branch to checkout |
#### Build the Image #### Build the Image
@ -79,25 +79,41 @@ I provided a build script, which is located in `docker-build/build.sh` but still
needs to be executed from the Repository root. needs to be executed from the Repository root.
```bash ```bash
docker-build/build.sh ./docker-build/build.sh
``` ```
The build Script not only builds the container, but also creates the docker The build Script not only builds the container, but also creates the docker
volume if not existing yet, or if empty it will just download the models. When volume if not existing yet, or if empty it will just download the models.
it is done you can run the container via the run script
#### Run the Container
After the build process is done, you can run the container via the provided
`docker-build/run.sh` script
```bash ```bash
docker-build/run.sh ./docker-build/run.sh
``` ```
When used without arguments, the container will start the website and provide When used without arguments, the container will start the website and provide
you the link to open it. But if you want to use some other parameters you can you the link to open it. But if you want to use some other parameters you can
also do so. also do so.
!!! example
```bash
docker-build/run.sh --from_file tests/validate_pr_prompt.txt
```
The output folder is located on the volume which is also used to store the model.
Find out more about available CLI-Parameter at [features/CLI.md](../features/CLI.md)
---
!!! warning "Deprecated" !!! warning "Deprecated"
From here on it is the rest of the previous Docker-Docs, which will still From here on you will find the rest of the previous Docker-Docs, which will still
provide usefull informations for one or the other. provide some usefull informations.
## Usage (time to have fun) ## Usage (time to have fun)

View File

@ -29,7 +29,6 @@ dependencies:
- pytorch-lightning=1.7.7 - pytorch-lightning=1.7.7
- scipy=1.9.3 - scipy=1.9.3
- streamlit=1.12.2 - streamlit=1.12.2
- taming-transformers-rom1504
- sympy=1.10.1 - sympy=1.10.1
- tensorboard=2.10.0 - tensorboard=2.10.0
- torchmetrics=0.10.1 - torchmetrics=0.10.1
@ -46,6 +45,7 @@ dependencies:
- getpass_asterisk - getpass_asterisk
- dependency_injector==4.40.0 - dependency_injector==4.40.0
- realesrgan==0.2.5.0 - realesrgan==0.2.5.0
- taming-transformers-rom1504
- test-tube==0.7.5 - test-tube==0.7.5
- git+https://github.com/openai/CLIP.git@main#egg=clip - git+https://github.com/openai/CLIP.git@main#egg=clip
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion - git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion