diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 11b21f6b80..ec4a105a9e 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -17,9 +17,9 @@ jobs: - aarch64 include: - arch: x86_64 - conda-env-file: environments-and-requirements/environment-lin-cuda.yml + conda-env-file: environment-lin-cuda.yml - arch: aarch64 - conda-env-file: environments-and-requirements/environment-lin-aarch64.yml + conda-env-file: environment-lin-aarch64.yml runs-on: ubuntu-latest name: ${{ matrix.arch }} steps: diff --git a/.github/workflows/test-invoke-conda.yml b/.github/workflows/test-invoke-conda.yml index a144303cc3..8fc3b10fef 100644 --- a/.github/workflows/test-invoke-conda.yml +++ b/.github/workflows/test-invoke-conda.yml @@ -23,7 +23,7 @@ jobs: - macOS-12 include: - os: ubuntu-latest - environment-file: environment.yml + environment-file: environment-lin-cuda.yml default-shell: bash -l {0} - os: macOS-12 environment-file: environment-mac.yml @@ -49,6 +49,9 @@ jobs: - name: create models.yaml from example run: cp configs/models.yaml.example configs/models.yaml + - name: create environment.yml + run: cp environments-and-requirements/${{ matrix.environment-file }} environment.yml + - name: Use cached conda packages id: use-cached-conda-packages uses: actions/cache@v3 @@ -61,7 +64,7 @@ jobs: uses: conda-incubator/setup-miniconda@v2 with: activate-environment: ${{ env.CONDA_ENV_NAME }} - environment-file: ${{ matrix.environment-file }} + environment-file: environment.yml miniconda-version: latest - name: set test prompt to main branch validation diff --git a/docker-build/Dockerfile b/docker-build/Dockerfile index 1de6f778bb..3f20e4b6f0 100644 --- a/docker-build/Dockerfile +++ b/docker-build/Dockerfile @@ -43,33 +43,42 @@ RUN apt-get update \ ARG invokeai_git=invoke-ai/InvokeAI ARG invokeai_branch=main ARG project_name=invokeai -RUN git clone -b ${invokeai_branch} https://github.com/${invokeai_git}.git /${project_name} \ - && cp /${project_name}/configs/models.yaml.example /${project_name}/configs/models.yaml \ - && ln -s /data/models/v1-5-pruned-emaonly.ckpt /${project_name}/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt \ - && ln -s /data/outputs/ /${project_name}/outputs +ARG conda_env_file=environment-lin-cuda.yml +RUN git clone -b ${invokeai_branch} https://github.com/${invokeai_git}.git "/${project_name}" \ + && cp \ + "/${project_name}/configs/models.yaml.example" \ + "/${project_name}/configs/models.yaml" \ + && ln -sf \ + "/${project_name}/environments-and-requirements/${conda_env_file}" \ + "/${project_name}/environment.yml" \ + && ln -sf \ + /data/models/v1-5-pruned-emaonly.ckpt \ + "/${project_name}/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt" \ + && ln -sf \ + /data/outputs/ \ + "/${project_name}/outputs" # set workdir -WORKDIR /${project_name} +WORKDIR "/${project_name}" # install conda env and preload models ARG conda_prefix=/opt/conda -ARG conda_env_file=environment.yml -COPY --from=get_miniconda ${conda_prefix} ${conda_prefix} -RUN source ${conda_prefix}/etc/profile.d/conda.sh \ +COPY --from=get_miniconda "${conda_prefix}" "${conda_prefix}" +RUN source "${conda_prefix}/etc/profile.d/conda.sh" \ && conda init bash \ && source ~/.bashrc \ && conda env create \ - --name ${project_name} \ - --file ${conda_env_file} \ + --name "${project_name}" \ && rm -Rf ~/.cache \ && conda clean -afy \ - && echo "conda activate ${project_name}" >> ~/.bashrc \ - && conda activate ${project_name} \ + && echo "conda activate ${project_name}" >> ~/.bashrc + +RUN source ~/.bashrc \ && python scripts/preload_models.py \ --no-interactive # Copy entrypoint and set env -ENV CONDA_PREFIX=${conda_prefix} -ENV PROJECT_NAME=${project_name} +ENV CONDA_PREFIX="${conda_prefix}" +ENV PROJECT_NAME="${project_name}" COPY docker-build/entrypoint.sh / ENTRYPOINT [ "/entrypoint.sh" ] diff --git a/docker-build/build.sh b/docker-build/build.sh index ebd1ca6a41..c7b94d7c0e 100755 --- a/docker-build/build.sh +++ b/docker-build/build.sh @@ -8,7 +8,7 @@ source ./docker-build/env.sh || echo "please run from repository root" || exit 1 invokeai_conda_version=${INVOKEAI_CONDA_VERSION:-py39_4.12.0-${platform/\//-}} invokeai_conda_prefix=${INVOKEAI_CONDA_PREFIX:-\/opt\/conda} -invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment.yml} +invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment-lin-cuda.yml} invokeai_git=${INVOKEAI_GIT:-invoke-ai/InvokeAI} invokeai_branch=${INVOKEAI_BRANCH:-main} huggingface_token=${HUGGINGFACE_TOKEN?} diff --git a/docs/features/WEBUIHOTKEYS.md b/docs/features/WEBUIHOTKEYS.md index 89432687dc..1f0af08dd6 100644 --- a/docs/features/WEBUIHOTKEYS.md +++ b/docs/features/WEBUIHOTKEYS.md @@ -2,7 +2,7 @@ title: WebUI Hotkey List --- -# **WebUI Hotkey List** +# :material-keyboard: **WebUI Hotkey List** ## General @@ -19,7 +19,7 @@ title: WebUI Hotkey List | ++ctrl+enter++ | Start processing | | ++shift+x++ | cancel Processing | | ++shift+d++ | Toggle Dark Mode | -| ` | Toggle console | +| ++"`"++ | Toggle console | ## Tabs @@ -48,10 +48,10 @@ title: WebUI Hotkey List | Setting | Hotkey | | ---------------------------- | --------------------- | -| [ | Decrease brush size | -| ] | Increase brush size | -| alt + [ | Decrease mask opacity | -| alt + ] | Increase mask opacity | +| ++"["++ | Decrease brush size | +| ++"]"++ | Increase brush size | +| ++alt+"["++ | Decrease mask opacity | +| ++alt+"]"++ | Increase mask opacity | | ++b++ | Select brush | | ++e++ | Select eraser | | ++ctrl+z++ | Undo brush stroke | diff --git a/docs/installation/INSTALL.md b/docs/installation/INSTALL.md index 994b575c5d..9cb65760bb 100644 --- a/docs/installation/INSTALL.md +++ b/docs/installation/INSTALL.md @@ -9,50 +9,49 @@ experience and preferences. 1. [1-click installer](INSTALL_1CLICK.md) - This is an automated shell script that will handle installation of - all dependencies for you, and is recommended for those who have - limited or no experience with the Python programming language, are - not currently interested in contributing to the project, and just want - the thing to install and run. In this version, you interact with the - web server and command-line clients through a shell script named - `invoke.sh` (Linux/Mac) or `invoke.bat` (Windows), and perform - updates using `update.sh` and `update.bat`. + This is an automated shell script that will handle installation of + all dependencies for you, and is recommended for those who have + limited or no experience with the Python programming language, are + not currently interested in contributing to the project, and just want + the thing to install and run. In this version, you interact with the + web server and command-line clients through a shell script named + `invoke.sh` (Linux/Mac) or `invoke.bat` (Windows), and perform + updates using `update.sh` and `update.bat`. 2. [Pre-compiled PIP installer](INSTALL_PCP.md) - This is a series of installer files for which all the requirements - for InvokeAI have been precompiled, thereby preventing the conflicts - that sometimes occur when an external library is changed unexpectedly. - It will leave you with an environment in which you interact directly - with the scripts for running the web and command line clients, and - you will update to new versions using standard developer commands. + This is a series of installer files for which all the requirements + for InvokeAI have been precompiled, thereby preventing the conflicts + that sometimes occur when an external library is changed unexpectedly. + It will leave you with an environment in which you interact directly + with the scripts for running the web and command line clients, and + you will update to new versions using standard developer commands. - This method is recommended for users with a bit of experience using - the `git` and `pip` tools. + This method is recommended for users with a bit of experience using + the `git` and `pip` tools. 3. [Manual Installation](INSTALL_MANUAL.md) - In this method you will manually run the commands needed to install - InvokeAI and its dependencies. We offer two recipes: one suited to - those who prefer the `conda` tool, and one suited to those who prefer - `pip` and Python virtual environments. + In this method you will manually run the commands needed to install + InvokeAI and its dependencies. We offer two recipes: one suited to + those who prefer the `conda` tool, and one suited to those who prefer + `pip` and Python virtual environments. + + This method is recommended for users who have previously used `conda` + or `pip` in the past, developers, and anyone who wishes to remain on + the cutting edge of future InvokeAI development and is willing to put + up with occasional glitches and breakage. - This method is recommended for users who have previously used `conda` - or `pip` in the past, developers, and anyone who wishes to remain on - the cutting edge of future InvokeAI development and is willing to put - up with occasional glitches and breakage. - 4. [Docker Installation](INSTALL_DOCKER.md) - We also offer a method for creating Docker containers containing - InvokeAI and its dependencies. This method is recommended for - individuals with experience with Docker containers and understand - the pluses and minuses of a container-based install. + We also offer a method for creating Docker containers containing + InvokeAI and its dependencies. This method is recommended for + individuals with experience with Docker containers and understand + the pluses and minuses of a container-based install. 5. [Jupyter Notebooks Installation](INSTALL_JUPYTER.md) - This method is suitable for running InvokeAI on a Google Colab - account. It is recommended for individuals who have previously - worked on the Colab and are comfortable with the Jupyter notebook - environment. - + This method is suitable for running InvokeAI on a Google Colab + account. It is recommended for individuals who have previously + worked on the Colab and are comfortable with the Jupyter notebook + environment. diff --git a/docs/installation/INSTALL_DOCKER.md b/docs/installation/INSTALL_DOCKER.md index 08d4c5a4d3..326ad39021 100644 --- a/docs/installation/INSTALL_DOCKER.md +++ b/docs/installation/INSTALL_DOCKER.md @@ -1,29 +1,28 @@ --- -Title: Docker +title: Docker --- # :fontawesome-brands-docker: Docker ## Before you begin -- For end users: Install InvokeAI locally using the instructions for - your OS. +- For end users: Install InvokeAI locally using the instructions for your OS. - For developers: For container-related development tasks or for enabling easy deployment to other environments (on-premises or cloud), follow these instructions. For general use, install locally to leverage your machine's GPU. ## Why containers? -They provide a flexible, reliable way to build and deploy InvokeAI. -You'll also use a Docker volume to store the largest model files and image -outputs as a first step in decoupling storage and compute. Future enhancements -can do this for other assets. See [Processes](https://12factor.net/processes) -under the Twelve-Factor App methodology for details on why running applications -in such a stateless fashion is important. +They provide a flexible, reliable way to build and deploy InvokeAI. You'll also +use a Docker volume to store the largest model files and image outputs as a +first step in decoupling storage and compute. Future enhancements can do this +for other assets. See [Processes](https://12factor.net/processes) under the +Twelve-Factor App methodology for details on why running applications in such a +stateless fashion is important. You can specify the target platform when building the image and running the -container. You'll also need to specify the InvokeAI requirements file -that matches the container's OS and the architecture it will run on. +container. You'll also need to specify the InvokeAI requirements file that +matches the container's OS and the architecture it will run on. Developers on Apple silicon (M1/M2): You [can't access your GPU cores from Docker containers](https://github.com/pytorch/pytorch/issues/81224) @@ -65,13 +64,14 @@ created in the last step. Some Suggestions of variables you may want to change besides the Token: -| Environment-Variable | Description | -| ------------------------------------------------------------------- | ------------------------------------------------------------------------ | -| `HUGGINGFACE_TOKEN="hg_aewirhghlawrgkjbarug2"` | This is the only required variable, without you can't get the checkpoint | -| `ARCH=aarch64` | if you are using a ARM based CPU | -| `INVOKEAI_TAG=yourname/invokeai:latest` | the Container Repository / Tag which will be used | -| `INVOKEAI_CONDA_ENV_FILE=environment-linux-aarch64.yml` | since environment.yml wouldn't work with aarch | -| `INVOKEAI_GIT="-b branchname https://github.com/username/reponame"` | if you want to use your own fork | +| Environment-Variable | Default value | Description | +| ------------------------- | ----------------------------- | ---------------------------------------------------------------------------- | +| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without you can't get the checkpoint | +| `ARCH` | x86_64 | if you are using a ARM based CPU | +| `INVOKEAI_TAG` | invokeai-x86_64 | the Container Repository / Tag which will be used | +| `INVOKEAI_CONDA_ENV_FILE` | environment-lin-cuda.yml | since environment.yml wouldn't work with aarch | +| `INVOKEAI_GIT` | invoke-ai/InvokeAI | the repository to use | +| `INVOKEAI_BRANCH` | main | the branch to checkout | #### Build the Image @@ -79,25 +79,41 @@ I provided a build script, which is located in `docker-build/build.sh` but still needs to be executed from the Repository root. ```bash -docker-build/build.sh +./docker-build/build.sh ``` The build Script not only builds the container, but also creates the docker -volume if not existing yet, or if empty it will just download the models. When -it is done you can run the container via the run script +volume if not existing yet, or if empty it will just download the models. + +#### Run the Container + +After the build process is done, you can run the container via the provided +`docker-build/run.sh` script ```bash -docker-build/run.sh +./docker-build/run.sh ``` When used without arguments, the container will start the website and provide you the link to open it. But if you want to use some other parameters you can also do so. +!!! example + + ```bash + docker-build/run.sh --from_file tests/validate_pr_prompt.txt + ``` + + The output folder is located on the volume which is also used to store the model. + + Find out more about available CLI-Parameter at [features/CLI.md](../features/CLI.md) + +--- + !!! warning "Deprecated" - From here on it is the rest of the previous Docker-Docs, which will still - provide usefull informations for one or the other. + From here on you will find the rest of the previous Docker-Docs, which will still + provide some usefull informations. ## Usage (time to have fun) diff --git a/environments-and-requirements/environment-mac.yml b/environments-and-requirements/environment-mac.yml index 21d133c572..99182476dd 100644 --- a/environments-and-requirements/environment-mac.yml +++ b/environments-and-requirements/environment-mac.yml @@ -29,7 +29,6 @@ dependencies: - pytorch-lightning=1.7.7 - scipy=1.9.3 - streamlit=1.12.2 - - taming-transformers-rom1504 - sympy=1.10.1 - tensorboard=2.10.0 - torchmetrics=0.10.1 @@ -46,6 +45,7 @@ dependencies: - getpass_asterisk - dependency_injector==4.40.0 - realesrgan==0.2.5.0 + - taming-transformers-rom1504 - test-tube==0.7.5 - git+https://github.com/openai/CLIP.git@main#egg=clip - git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion