diff --git a/.dockerignore b/.dockerignore
index 255335040f..5df924ddee 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -1,16 +1,13 @@
*
!backend
+!environments-and-requirements
!frontend
-!binary_installer
!ldm
!main.py
!scripts
!server
!static
!setup.py
-!docker-build
-!docs
-docker-build/Dockerfile
# Guard against pulling in any models that might exist in the directory tree
**/*.pt*
@@ -19,8 +16,4 @@ docker-build/Dockerfile
!configs
configs/models.yaml
-# unignore environment dirs/files, but ignore the environment.yml file or symlink in case it exists
-!environment*
-environment.yml
-
**/__pycache__
diff --git a/.github/workflows/build-cloud-img.yml b/.github/workflows/build-cloud-img.yml
index 9ef41a26c3..f27cbea80a 100644
--- a/.github/workflows/build-cloud-img.yml
+++ b/.github/workflows/build-cloud-img.yml
@@ -1,15 +1,15 @@
name: Build and push cloud image
on:
workflow_dispatch:
- push:
- branches:
- - main
- tags:
- - v*
- # we will NOT push the image on pull requests, only test buildability.
- pull_request:
- branches:
- - main
+ # push:
+ # branches:
+ # - main
+ # tags:
+ # - v*
+ # # we will NOT push the image on pull requests, only test buildability.
+ # pull_request:
+ # branches:
+ # - main
permissions:
contents: read
@@ -82,6 +82,6 @@ jobs:
file: docker-build/Dockerfile.cloud
platforms: Linux/${{ matrix.arch }}
# do not push the image on PRs
- push: ${{ github.event_name != 'pull_request' }}
+ push: false
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
diff --git a/.github/workflows/lint-frontend.yml b/.github/workflows/lint-frontend.yml
new file mode 100644
index 0000000000..3b4496d800
--- /dev/null
+++ b/.github/workflows/lint-frontend.yml
@@ -0,0 +1,28 @@
+name: Lint frontend
+
+on:
+ pull_request:
+ paths:
+ - 'frontend/**'
+ push:
+ paths:
+ - 'frontend/**'
+
+defaults:
+ run:
+ working-directory: frontend
+
+jobs:
+ lint-frontend:
+ runs-on: ubuntu-22.04
+ steps:
+ - name: Setup Node 18
+ uses: actions/setup-node@v3
+ with:
+ node-version: '18'
+ - uses: actions/checkout@v3
+ - run: 'yarn install --frozen-lockfile'
+ - run: 'yarn tsc'
+ - run: 'yarn run madge'
+ - run: 'yarn run lint --max-warnings=0'
+ - run: 'yarn run prettier --check'
diff --git a/.github/workflows/pyflakes.yml b/.github/workflows/pyflakes.yml
new file mode 100644
index 0000000000..fcf8103d46
--- /dev/null
+++ b/.github/workflows/pyflakes.yml
@@ -0,0 +1,19 @@
+on:
+ pull_request:
+ push:
+ branches:
+ - main
+ - development
+ - 'release-candidate-*'
+
+jobs:
+ pyflakes:
+ name: runner / pyflakes
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - name: pyflakes
+ uses: reviewdog/action-pyflakes@v1
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ reporter: github-pr-review
diff --git a/README.md b/README.md
index f0522755ca..c07ade79b1 100644
--- a/README.md
+++ b/README.md
@@ -51,7 +51,7 @@ requests. Be sure to use the provided templates. They will help us diagnose issu
For full installation and upgrade instructions, please see:
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
-1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.2.3)
+1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
2. Download the .zip file for your OS (Windows/macOS/Linux).
3. Unzip the file.
4. If you are on Windows, double-click on the `install.bat` script. On macOS, open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press return. On Linux, run `install.sh`.
@@ -167,10 +167,7 @@ To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the
If you are unfamiliar with how
to contribute to GitHub projects, here is a
-[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress, but for now the most
-important thing is to **make your pull request against the "development" branch**, and not against
-"main". This will help keep public breakage to a minimum and will allow you to propose more radical
-changes.
+[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
We hope you enjoy using our software as much as we enjoy creating it,
and we hope that some of those of you who are reading this will elect
diff --git a/docker-build/Dockerfile b/docker-build/Dockerfile
index d85d65dd57..353a02b50c 100644
--- a/docker-build/Dockerfile
+++ b/docker-build/Dockerfile
@@ -14,9 +14,10 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
-# set workdir, PATH and copy sources
-WORKDIR /usr/src/app
-ENV PATH /usr/src/app/.venv/bin:$PATH
+# set WORKDIR, PATH and copy sources
+ARG WORKDIR=/usr/src/app
+WORKDIR ${WORKDIR}
+ENV PATH ${WORKDIR}/.venv/bin:$PATH
ARG PIP_REQUIREMENTS=requirements-lin-cuda.txt
COPY . ./environments-and-requirements/${PIP_REQUIREMENTS} ./
@@ -38,18 +39,28 @@ FROM python:3.10-slim AS runtime
RUN apt-get update \
&& apt-get install -y \
--no-install-recommends \
+ build-essential=12.9 \
libgl1-mesa-glx=20.3.* \
libglib2.0-0=2.66.* \
+ libopencv-dev=4.5.* \
+ python3-opencv=4.5.* \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
-WORKDIR /usr/src/app
-COPY --from=builder /usr/src/app .
-
-# set Environment, Entrypoint and default CMD
+# setup environment
+ARG WORKDIR=/usr/src/app
+WORKDIR ${WORKDIR}
+COPY --from=builder ${WORKDIR} .
+ENV PATH=${WORKDIR}/.venv/bin:$PATH
ENV INVOKEAI_ROOT /data
ENV INVOKE_MODEL_RECONFIGURE --yes
-ENV PATH=/usr/src/app/.venv/bin:$PATH
+# Initialize patchmatch
+RUN ln -sf \
+ /usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv4.pc \
+ /usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv.pc \
+ && python3 -c "from patchmatch import patch_match"
+
+# set Entrypoint and default CMD
ENTRYPOINT [ "python3", "scripts/invoke.py" ]
CMD [ "--web", "--host=0.0.0.0" ]
diff --git a/docker-build/build.sh b/docker-build/build.sh
index 6f0fbc174f..14e010d9c3 100755
--- a/docker-build/build.sh
+++ b/docker-build/build.sh
@@ -1,49 +1,35 @@
#!/usr/bin/env bash
set -e
-# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
-# configure values by using env when executing build.sh f.e. `env ARCH=aarch64 ./build.sh`
+# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
source ./docker-build/env.sh \
|| echo "please execute docker-build/build.sh from repository root" \
|| exit 1
-pip_requirements=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt}
-dockerfile=${INVOKE_DOCKERFILE:-docker-build/Dockerfile}
+PIP_REQUIREMENTS=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt}
+DOCKERFILE=${INVOKE_DOCKERFILE:-docker-build/Dockerfile}
# print the settings
echo -e "You are using these values:\n"
-echo -e "Dockerfile:\t ${dockerfile}"
-echo -e "requirements:\t ${pip_requirements}"
-echo -e "volumename:\t ${volumename}"
-echo -e "arch:\t\t ${arch}"
-echo -e "platform:\t ${platform}"
-echo -e "invokeai_tag:\t ${invokeai_tag}\n"
+echo -e "Dockerfile:\t ${DOCKERFILE}"
+echo -e "Requirements:\t ${PIP_REQUIREMENTS}"
+echo -e "Volumename:\t ${VOLUMENAME}"
+echo -e "arch:\t\t ${ARCH}"
+echo -e "Platform:\t ${PLATFORM}"
+echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n"
-if [[ -n "$(docker volume ls -f name="${volumename}" -q)" ]]; then
- echo "Volume already exists"
- echo
+if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
+ echo -e "Volume already exists\n"
else
echo -n "createing docker volume "
- docker volume create "${volumename}"
+ docker volume create "${VOLUMENAME}"
fi
# Build Container
docker build \
- --platform="${platform}" \
- --tag="${invokeai_tag}" \
- --build-arg="PIP_REQUIREMENTS=${pip_requirements}" \
- --file="${dockerfile}" \
+ --platform="${PLATFORM}" \
+ --tag="${INVOKEAI_TAG}" \
+ --build-arg="PIP_REQUIREMENTS=${PIP_REQUIREMENTS}" \
+ --file="${DOCKERFILE}" \
.
-
-docker run \
- --rm \
- --platform="$platform" \
- --name="$project_name" \
- --hostname="$project_name" \
- --mount="source=$volumename,target=/data" \
- --mount="type=bind,source=$HOME/.huggingface,target=/root/.huggingface" \
- --env="HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}" \
- --entrypoint="python3" \
- "${invokeai_tag}" \
- scripts/configure_invokeai.py --yes
diff --git a/docker-build/env.sh b/docker-build/env.sh
index 76d4127ec1..a9021b484d 100644
--- a/docker-build/env.sh
+++ b/docker-build/env.sh
@@ -1,15 +1,9 @@
#!/usr/bin/env bash
-project_name=${PROJECT_NAME:-invokeai}
-volumename=${VOLUMENAME:-${project_name}_data}
-arch=${ARCH:-x86_64}
-platform=${PLATFORM:-Linux/${arch}}
-invokeai_tag=${INVOKEAI_TAG:-${project_name}:${arch}}
-gpus=${GPU_FLAGS:+--gpus=${GPU_FLAGS}}
-
-export project_name
-export volumename
-export arch
-export platform
-export invokeai_tag
-export gpus
+# Variables shared by build.sh and run.sh
+REPOSITORY_NAME=${REPOSITORY_NAME:-$(basename "$(git rev-parse --show-toplevel)")}
+VOLUMENAME=${VOLUMENAME:-${REPOSITORY_NAME,,}_data}
+ARCH=${ARCH:-$(arch)}
+PLATFORM=${PLATFORM:-Linux/${ARCH}}
+CONTAINER_FLAVOR=${CONTAINER_FLAVOR:-cuda}
+INVOKEAI_TAG=${REPOSITORY_NAME,,}-${CONTAINER_FLAVOR}:${INVOKEAI_TAG:-latest}
diff --git a/docker-build/run.sh b/docker-build/run.sh
index d2f232d6fa..b7089fccd2 100755
--- a/docker-build/run.sh
+++ b/docker-build/run.sh
@@ -1,21 +1,31 @@
#!/usr/bin/env bash
set -e
-source ./docker-build/env.sh || echo "please run from repository root" || exit 1
+# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
+# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
+
+source ./docker-build/env.sh \
+ || echo "please run from repository root" \
+ || exit 1
+
+# check if HUGGINGFACE_TOKEN is available
+# You must have accepted the terms of use for required models
+HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN:?Please set your token for Huggingface as HUGGINGFACE_TOKEN}
echo -e "You are using these values:\n"
-echo -e "volumename:\t ${volumename}"
-echo -e "invokeai_tag:\t ${invokeai_tag}\n"
+echo -e "Volumename:\t ${VOLUMENAME}"
+echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n"
docker run \
--interactive \
--tty \
--rm \
- --platform="$platform" \
- --name="$project_name" \
- --hostname="$project_name" \
- --mount="source=$volumename,target=/data" \
+ --platform="$PLATFORM" \
+ --name="${REPOSITORY_NAME,,}" \
+ --hostname="${REPOSITORY_NAME,,}" \
+ --mount="source=$VOLUMENAME,target=/data" \
+ --env="HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}" \
--publish=9090:9090 \
--cap-add=sys_nice \
- $gpus \
- "$invokeai_tag" ${1:+$@}
+ ${GPU_FLAGS:+--gpus=${GPU_FLAGS}} \
+ "$INVOKEAI_TAG" ${1:+$@}
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index f9aa033d30..a14c123008 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -4,180 +4,275 @@ title: Changelog
# :octicons-log-16: **Changelog**
+## v2.2.4 (11 December 2022)
+
+**the `invokeai` directory**
+
+Previously there were two directories to worry about, the directory that
+contained the InvokeAI source code and the launcher scripts, and the `invokeai`
+directory that contained the models files, embeddings, configuration and
+outputs. With the 2.2.4 release, this dual system is done away with, and
+everything, including the `invoke.bat` and `invoke.sh` launcher scripts, now
+live in a directory named `invokeai`. By default this directory is located in
+your home directory (e.g. `\Users\yourname` on Windows), but you can select
+where it goes at install time.
+
+After installation, you can delete the install directory (the one that the zip
+file creates when it unpacks). Do **not** delete or move the `invokeai`
+directory!
+
+**Initialization file `invokeai/invokeai.init`**
+
+You can place frequently-used startup options in this file, such as the default
+number of steps or your preferred sampler. To keep everything in one place, this
+file has now been moved into the `invokeai` directory and is named
+`invokeai.init`.
+
+**To update from Version 2.2.3**
+
+The easiest route is to download and unpack one of the 2.2.4 installer files.
+When it asks you for the location of the `invokeai` runtime directory, respond
+with the path to the directory that contains your 2.2.3 `invokeai`. That is, if
+`invokeai` lives at `C:\Users\fred\invokeai`, then answer with `C:\Users\fred`
+and answer "Y" when asked if you want to reuse the directory.
+
+The `update.sh` (`update.bat`) script that came with the 2.2.3 source installer
+does not know about the new directory layout and won't be fully functional.
+
+**To update to 2.2.5 (and beyond) there's now an update path**
+
+As they become available, you can update to more recent versions of InvokeAI
+using an `update.sh` (`update.bat`) script located in the `invokeai` directory.
+Running it without any arguments will install the most recent version of
+InvokeAI. Alternatively, you can get set releases by running the `update.sh`
+script with an argument in the command shell. This syntax accepts the path to
+the desired release's zip file, which you can find by clicking on the green
+"Code" button on this repository's home page.
+
+**Other 2.2.4 Improvements**
+
+- Fix InvokeAI GUI initialization by @addianto in #1687
+- fix link in documentation by @lstein in #1728
+- Fix broken link by @ShawnZhong in #1736
+- Remove reference to binary installer by @lstein in #1731
+- documentation fixes for 2.2.3 by @lstein in #1740
+- Modify installer links to point closer to the source installer by @ebr in
+ #1745
+- add documentation warning about 1650/60 cards by @lstein in #1753
+- Fix Linux source URL in installation docs by @andybearman in #1756
+- Make install instructions discoverable in readme by @damian0815 in #1752
+- typo fix by @ofirkris in #1755
+- Non-interactive model download (support HUGGINGFACE_TOKEN) by @ebr in #1578
+- fix(srcinstall): shell installer - cp scripts instead of linking by @tildebyte
+ in #1765
+- stability and usage improvements to binary & source installers by @lstein in
+ #1760
+- fix off-by-one bug in cross-attention-control by @damian0815 in #1774
+- Eventually update APP_VERSION to 2.2.3 by @spezialspezial in #1768
+- invoke script cds to its location before running by @lstein in #1805
+- Make PaperCut and VoxelArt models load again by @lstein in #1730
+- Fix --embedding_directory / --embedding_path not working by @blessedcoolant in
+ #1817
+- Clean up readme by @hipsterusername in #1820
+- Optimized Docker build with support for external working directory by @ebr in
+ #1544
+- disable pushing the cloud container by @mauwii in #1831
+- Fix docker push github action and expand with additional metadata by @ebr in
+ #1837
+- Fix Broken Link To Notebook by @VedantMadane in #1821
+- Account for flat models by @spezialspezial in #1766
+- Update invoke.bat.in isolate environment variables by @lynnewu in #1833
+- Arch Linux Specific PatchMatch Instructions & fixing conda install on linux by
+ @SammCheese in #1848
+- Make force free GPU memory work in img2img by @addianto in #1844
+- New installer by @lstein
+
+## v2.2.3 (2 December 2022)
+
+!!! Note
+
+ This point release removes references to the binary installer from the
+ installation guide. The binary installer is not stable at the current
+ time. First time users are encouraged to use the "source" installer as
+ described in [Installing InvokeAI with the Source Installer](installation/INSTALL_SOURCE.md)
+
+With InvokeAI 2.2, this project now provides enthusiasts and professionals a
+robust workflow solution for creating AI-generated and human facilitated
+compositions. Additional enhancements have been made as well, improving safety,
+ease of use, and installation.
+
+Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
+512x768 image (and less for smaller images), and is compatible with
+Windows/Linux/Mac (M1 & M2).
+
+You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
+introduces the main WebUI enhancement for version 2.2 -
+[The Unified Canvas](features/UNIFIED_CANVAS.md). This new workflow is the
+biggest enhancement added to the WebUI to date, and unlocks a stunning amount of
+potential for users to create and iterate on their creations. The following
+sections describe what's new for InvokeAI.
+
+## v2.2.2 (30 November 2022)
+
+!!! note
+
+ The binary installer is not ready for prime time. First time users are recommended to install via the "source" installer accessible through the links at the bottom of this page.****
+
+With InvokeAI 2.2, this project now provides enthusiasts and professionals a
+robust workflow solution for creating AI-generated and human facilitated
+compositions. Additional enhancements have been made as well, improving safety,
+ease of use, and installation.
+
+Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
+512x768 image (and less for smaller images), and is compatible with
+Windows/Linux/Mac (M1 & M2).
+
+You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
+introduces the main WebUI enhancement for version 2.2 -
+[The Unified Canvas](https://invoke-ai.github.io/InvokeAI/features/UNIFIED_CANVAS/).
+This new workflow is the biggest enhancement added to the WebUI to date, and
+unlocks a stunning amount of potential for users to create and iterate on their
+creations. The following sections describe what's new for InvokeAI.
+
+## v2.2.0 (2 December 2022)
+
+With InvokeAI 2.2, this project now provides enthusiasts and professionals a
+robust workflow solution for creating AI-generated and human facilitated
+compositions. Additional enhancements have been made as well, improving safety,
+ease of use, and installation.
+
+Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
+512x768 image (and less for smaller images), and is compatible with
+Windows/Linux/Mac (M1 & M2).
+
+You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
+introduces the main WebUI enhancement for version 2.2 -
+[The Unified Canvas](features/UNIFIED_CANVAS.md). This new workflow is the
+biggest enhancement added to the WebUI to date, and unlocks a stunning amount of
+potential for users to create and iterate on their creations. The following
+sections describe what's new for InvokeAI.
+
+## v2.1.3 (13 November 2022)
+
+- A choice of installer scripts that automate installation and configuration.
+ See
+ [Installation](installation/index.md).
+- A streamlined manual installation process that works for both Conda and
+ PIP-only installs. See
+ [Manual Installation](installation/INSTALL_MANUAL.md).
+- The ability to save frequently-used startup options (model to load, steps,
+ sampler, etc) in a `.invokeai` file. See
+ [Client](features/CLI.md)
+- Support for AMD GPU cards (non-CUDA) on Linux machines.
+- Multiple bugs and edge cases squashed.
+
## v2.1.0 (2 November 2022)
-- update mac instructions to use invokeai for env name by @willwillems in
- https://github.com/invoke-ai/InvokeAI/pull/1030
-- Update .gitignore by @blessedcoolant in
- https://github.com/invoke-ai/InvokeAI/pull/1040
-- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579
- missing after merge by @skurovec in
- https://github.com/invoke-ai/InvokeAI/pull/1056
-- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in
- https://github.com/invoke-ai/InvokeAI/pull/1060
-- Print out the device type which is used by @manzke in
- https://github.com/invoke-ai/InvokeAI/pull/1073
-- Hires Addition by @hipsterusername in
- https://github.com/invoke-ai/InvokeAI/pull/1063
+- update mac instructions to use invokeai for env name by @willwillems in #1030
+- Update .gitignore by @blessedcoolant in #1040
+- reintroduce fix for m1 from #579 missing after merge by @skurovec in #1056
+- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in #1060
+- Print out the device type which is used by @manzke in #1073
+- Hires Addition by @hipsterusername in #1063
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
- @skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
+ @skurovec in #1081
- Forward dream.py to invoke.py using the same interpreter, add deprecation
- warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
-- fix noisy images at high step counts by @lstein in
- https://github.com/invoke-ai/InvokeAI/pull/1086
-- Generalize facetool strength argument by @db3000 in
- https://github.com/invoke-ai/InvokeAI/pull/1078
+ warning by @db3000 in #1077
+- fix noisy images at high step counts by @lstein in #1086
+- Generalize facetool strength argument by @db3000 in #1078
- Enable fast switching among models at the invoke> command line by @lstein in
- https://github.com/invoke-ai/InvokeAI/pull/1066
-- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in
- https://github.com/invoke-ai/InvokeAI/pull/1095
-- Update generate.py by @unreleased in
- https://github.com/invoke-ai/InvokeAI/pull/1109
-- Update 'ldm' env to 'invokeai' in troubleshooting steps by @19wolf in
- https://github.com/invoke-ai/InvokeAI/pull/1125
-- Fixed documentation typos and resolved merge conflicts by @rupeshs in
- https://github.com/invoke-ai/InvokeAI/pull/1123
-- Fix broken doc links, fix malaprop in the project subtitle by @majick in
- https://github.com/invoke-ai/InvokeAI/pull/1131
-- Only output facetool parameters if enhancing faces by @db3000 in
- https://github.com/invoke-ai/InvokeAI/pull/1119
+ #1066
+- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in #1095
+- Update generate.py by @unreleased in #1109
+- Update 'ldm' env to 'invokeai' in troubleshooting steps by @19wolf in #1125
+- Fixed documentation typos and resolved merge conflicts by @rupeshs in #1123
+- Fix broken doc links, fix malaprop in the project subtitle by @majick in #1131
+- Only output facetool parameters if enhancing faces by @db3000 in #1119
- Update gitignore to ignore codeformer weights at new location by
- @spezialspezial in https://github.com/invoke-ai/InvokeAI/pull/1136
-- fix links to point to invoke-ai.github.io #1117 by @mauwii in
- https://github.com/invoke-ai/InvokeAI/pull/1143
-- Rework-mkdocs by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1144
+ @spezialspezial in #1136
+- fix links to point to invoke-ai.github.io #1117 by @mauwii in #1143
+- Rework-mkdocs by @mauwii in #1144
- add option to CLI and pngwriter that allows user to set PNG compression level
- by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
-- Fix img2img DDIM index out of bound by @wfng92 in
- https://github.com/invoke-ai/InvokeAI/pull/1137
-- Fix gh actions by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1128
-- update mac instructions to use invokeai for env name by @willwillems in
- https://github.com/invoke-ai/InvokeAI/pull/1030
-- Update .gitignore by @blessedcoolant in
- https://github.com/invoke-ai/InvokeAI/pull/1040
-- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579
- missing after merge by @skurovec in
- https://github.com/invoke-ai/InvokeAI/pull/1056
-- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in
- https://github.com/invoke-ai/InvokeAI/pull/1060
-- Print out the device type which is used by @manzke in
- https://github.com/invoke-ai/InvokeAI/pull/1073
-- Hires Addition by @hipsterusername in
- https://github.com/invoke-ai/InvokeAI/pull/1063
+ by @lstein in #1127
+- Fix img2img DDIM index out of bound by @wfng92 in #1137
+- Fix gh actions by @mauwii in #1128
+- update mac instructions to use invokeai for env name by @willwillems in #1030
+- Update .gitignore by @blessedcoolant in #1040
+- reintroduce fix for m1 from #579 missing after merge by @skurovec in #1056
+- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in #1060
+- Print out the device type which is used by @manzke in #1073
+- Hires Addition by @hipsterusername in #1063
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
- @skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
+ @skurovec in #1081
- Forward dream.py to invoke.py using the same interpreter, add deprecation
- warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
-- fix noisy images at high step counts by @lstein in
- https://github.com/invoke-ai/InvokeAI/pull/1086
-- Generalize facetool strength argument by @db3000 in
- https://github.com/invoke-ai/InvokeAI/pull/1078
+ warning by @db3000 in #1077
+- fix noisy images at high step counts by @lstein in #1086
+- Generalize facetool strength argument by @db3000 in #1078
- Enable fast switching among models at the invoke> command line by @lstein in
- https://github.com/invoke-ai/InvokeAI/pull/1066
-- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in
- https://github.com/invoke-ai/InvokeAI/pull/1095
-- Fixed documentation typos and resolved merge conflicts by @rupeshs in
- https://github.com/invoke-ai/InvokeAI/pull/1123
-- Only output facetool parameters if enhancing faces by @db3000 in
- https://github.com/invoke-ai/InvokeAI/pull/1119
+ #1066
+- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in #1095
+- Fixed documentation typos and resolved merge conflicts by @rupeshs in #1123
+- Only output facetool parameters if enhancing faces by @db3000 in #1119
- add option to CLI and pngwriter that allows user to set PNG compression level
- by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
-- Fix img2img DDIM index out of bound by @wfng92 in
- https://github.com/invoke-ai/InvokeAI/pull/1137
-- Add text prompt to inpaint mask support by @lstein in
- https://github.com/invoke-ai/InvokeAI/pull/1133
+ by @lstein in #1127
+- Fix img2img DDIM index out of bound by @wfng92 in #1137
+- Add text prompt to inpaint mask support by @lstein in #1133
- Respect http[s] protocol when making socket.io middleware by @damian0815 in
- https://github.com/invoke-ai/InvokeAI/pull/976
-- WebUI: Adds Codeformer support by @psychedelicious in
- https://github.com/invoke-ai/InvokeAI/pull/1151
-- Skips normalizing prompts for web UI metadata by @psychedelicious in
- https://github.com/invoke-ai/InvokeAI/pull/1165
-- Add Asymmetric Tiling by @carson-katri in
- https://github.com/invoke-ai/InvokeAI/pull/1132
-- Web UI: Increases max CFG Scale to 200 by @psychedelicious in
- https://github.com/invoke-ai/InvokeAI/pull/1172
+ #976
+- WebUI: Adds Codeformer support by @psychedelicious in #1151
+- Skips normalizing prompts for web UI metadata by @psychedelicious in #1165
+- Add Asymmetric Tiling by @carson-katri in #1132
+- Web UI: Increases max CFG Scale to 200 by @psychedelicious in #1172
- Corrects color channels in face restoration; Fixes #1167 by @psychedelicious
- in https://github.com/invoke-ai/InvokeAI/pull/1175
+ in #1175
- Flips channels using array slicing instead of using OpenCV by @psychedelicious
- in https://github.com/invoke-ai/InvokeAI/pull/1178
-- Fix typo in docs: s/Formally/Formerly by @noodlebox in
- https://github.com/invoke-ai/InvokeAI/pull/1176
-- fix clipseg loading problems by @lstein in
- https://github.com/invoke-ai/InvokeAI/pull/1177
-- Correct color channels in upscale using array slicing by @wfng92 in
- https://github.com/invoke-ai/InvokeAI/pull/1181
+ in #1178
+- Fix typo in docs: s/Formally/Formerly by @noodlebox in #1176
+- fix clipseg loading problems by @lstein in #1177
+- Correct color channels in upscale using array slicing by @wfng92 in #1181
- Web UI: Filters existing images when adding new images; Fixes #1085 by
- @psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1171
-- fix a number of bugs in textual inversion by @lstein in
- https://github.com/invoke-ai/InvokeAI/pull/1190
-- Improve !fetch, add !replay command by @ArDiouscuros in
- https://github.com/invoke-ai/InvokeAI/pull/882
-- Fix generation of image with s>1000 by @holstvoogd in
- https://github.com/invoke-ai/InvokeAI/pull/951
-- Web UI: Gallery improvements by @psychedelicious in
- https://github.com/invoke-ai/InvokeAI/pull/1198
-- Update CLI.md by @krummrey in https://github.com/invoke-ai/InvokeAI/pull/1211
-- outcropping improvements by @lstein in
- https://github.com/invoke-ai/InvokeAI/pull/1207
-- add support for loading VAE autoencoders by @lstein in
- https://github.com/invoke-ai/InvokeAI/pull/1216
-- remove duplicate fix_func for MPS by @wfng92 in
- https://github.com/invoke-ai/InvokeAI/pull/1210
-- Metadata storage and retrieval fixes by @lstein in
- https://github.com/invoke-ai/InvokeAI/pull/1204
-- nix: add shell.nix file by @Cloudef in
- https://github.com/invoke-ai/InvokeAI/pull/1170
-- Web UI: Changes vite dist asset paths to relative by @psychedelicious in
- https://github.com/invoke-ai/InvokeAI/pull/1185
-- Web UI: Removes isDisabled from PromptInput by @psychedelicious in
- https://github.com/invoke-ai/InvokeAI/pull/1187
+ @psychedelicious in #1171
+- fix a number of bugs in textual inversion by @lstein in #1190
+- Improve !fetch, add !replay command by @ArDiouscuros in #882
+- Fix generation of image with s>1000 by @holstvoogd in #951
+- Web UI: Gallery improvements by @psychedelicious in #1198
+- Update CLI.md by @krummrey in #1211
+- outcropping improvements by @lstein in #1207
+- add support for loading VAE autoencoders by @lstein in #1216
+- remove duplicate fix_func for MPS by @wfng92 in #1210
+- Metadata storage and retrieval fixes by @lstein in #1204
+- nix: add shell.nix file by @Cloudef in #1170
+- Web UI: Changes vite dist asset paths to relative by @psychedelicious in #1185
+- Web UI: Removes isDisabled from PromptInput by @psychedelicious in #1187
- Allow user to generate images with initial noise as on M1 / mps system by
- @ArDiouscuros in https://github.com/invoke-ai/InvokeAI/pull/981
-- feat: adding filename format template by @plucked in
- https://github.com/invoke-ai/InvokeAI/pull/968
-- Web UI: Fixes broken bundle by @psychedelicious in
- https://github.com/invoke-ai/InvokeAI/pull/1242
-- Support runwayML custom inpainting model by @lstein in
- https://github.com/invoke-ai/InvokeAI/pull/1243
-- Update IMG2IMG.md by @talitore in
- https://github.com/invoke-ai/InvokeAI/pull/1262
+ @ArDiouscuros in #981
+- feat: adding filename format template by @plucked in #968
+- Web UI: Fixes broken bundle by @psychedelicious in #1242
+- Support runwayML custom inpainting model by @lstein in #1243
+- Update IMG2IMG.md by @talitore in #1262
- New dockerfile - including a build- and a run- script as well as a GH-Action
- by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1233
+ by @mauwii in #1233
- cut over from karras to model noise schedule for higher steps by @lstein in
- https://github.com/invoke-ai/InvokeAI/pull/1222
-- Prompt tweaks by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1268
-- Outpainting implementation by @Kyle0654 in
- https://github.com/invoke-ai/InvokeAI/pull/1251
-- fixing aspect ratio on hires by @tjennings in
- https://github.com/invoke-ai/InvokeAI/pull/1249
-- Fix-build-container-action by @mauwii in
- https://github.com/invoke-ai/InvokeAI/pull/1274
-- handle all unicode characters by @damian0815 in
- https://github.com/invoke-ai/InvokeAI/pull/1276
-- adds models.user.yml to .gitignore by @JakeHL in
- https://github.com/invoke-ai/InvokeAI/pull/1281
-- remove debug branch, set fail-fast to false by @mauwii in
- https://github.com/invoke-ai/InvokeAI/pull/1284
-- Protect-secrets-on-pr by @mauwii in
- https://github.com/invoke-ai/InvokeAI/pull/1285
-- Web UI: Adds initial inpainting implementation by @psychedelicious in
- https://github.com/invoke-ai/InvokeAI/pull/1225
-- fix environment-mac.yml - tested on x64 and arm64 by @mauwii in
- https://github.com/invoke-ai/InvokeAI/pull/1289
-- Use proper authentication to download model by @mauwii in
- https://github.com/invoke-ai/InvokeAI/pull/1287
-- Prevent indexing error for mode RGB by @spezialspezial in
- https://github.com/invoke-ai/InvokeAI/pull/1294
+ #1222
+- Prompt tweaks by @lstein in #1268
+- Outpainting implementation by @Kyle0654 in #1251
+- fixing aspect ratio on hires by @tjennings in #1249
+- Fix-build-container-action by @mauwii in #1274
+- handle all unicode characters by @damian0815 in #1276
+- adds models.user.yml to .gitignore by @JakeHL in #1281
+- remove debug branch, set fail-fast to false by @mauwii in #1284
+- Protect-secrets-on-pr by @mauwii in #1285
+- Web UI: Adds initial inpainting implementation by @psychedelicious in #1225
+- fix environment-mac.yml - tested on x64 and arm64 by @mauwii in #1289
+- Use proper authentication to download model by @mauwii in #1287
+- Prevent indexing error for mode RGB by @spezialspezial in #1294
- Integrate sd-v1-5 model into test matrix (easily expandable), remove
- unecesarry caches by @mauwii in
- https://github.com/invoke-ai/InvokeAI/pull/1293
-- add --no-interactive to configure_invokeai step by @mauwii in
- https://github.com/invoke-ai/InvokeAI/pull/1302
+ unecesarry caches by @mauwii in #1293
+- add --no-interactive to configure_invokeai step by @mauwii in #1302
- 1-click installer and updater. Uses micromamba to install git and conda into a
contained environment (if necessary) before running the normal installation
- script by @cmdr2 in https://github.com/invoke-ai/InvokeAI/pull/1253
-- configure_invokeai.py script downloads the weight files by @lstein in
- https://github.com/invoke-ai/InvokeAI/pull/1290
+ script by @cmdr2 in #1253
+- configure_invokeai.py script downloads the weight files by @lstein in #1290
## v2.0.1 (13 October 2022)
diff --git a/docs/features/CONCEPTS.md b/docs/features/CONCEPTS.md
index ebb0a59706..f92e52f9e0 100644
--- a/docs/features/CONCEPTS.md
+++ b/docs/features/CONCEPTS.md
@@ -37,8 +37,9 @@ generated using the command-line client and the Stable Diffusion 1.5 model:
You can also combine styles and concepts:
## Using a Hugging Face Concept
@@ -49,24 +50,26 @@ find out what each concept is for, you can browse the
look at examples of what each concept produces.
When you have an idea of a concept you wish to try, go to the command-line
-client (CLI) and type a "<" character and the beginning of the Hugging Face
-concept name you wish to load. Press the Tab key, and the CLI will show you all
-matching concepts. You can also type "<" and Tab to get a listing of all ~800
-concepts, but be prepared to scroll up to see them all! If there is more than
-one match you can continue to type and Tab until the concept is completed.
+client (CLI) and type a `<` character and the beginning of the Hugging Face
+concept name you wish to load. Press ++tab++, and the CLI will show you all
+matching concepts. You can also type `<` and hit ++tab++ to get a listing of all
+~800 concepts, but be prepared to scroll up to see them all! If there is more
+than one match you can continue to type and ++tab++ until the concept is
+completed.
-For example if you type "<x" and Tab, you'll be prompted with the
-completions:
+!!! example
-```
-
-```
+ if you type in `
+ ```
-Finish your prompt and generate as usual. You may include multiple concept terms
-in the prompt.
+ Now type `id` and press ++tab++. It will be autocompleted to ``
+ because this is a unique match.
+
+ Finish your prompt and generate as usual. You may include multiple concept terms
+ in the prompt.
If you have never used this concept before, you will see a message that the TI
model is being downloaded and installed. After this, the concept will be saved
@@ -75,10 +78,10 @@ locally (in the `models/sd-concepts-library` directory) for future use.
Several steps happen during downloading and installation, including a scan of
the file for malicious code. Should any errors occur, you will be warned and the
concept will fail to load. Generation will then continue treating the trigger
-term as a normal string of characters (e.g. as literal "<ghibli-face>").
+term as a normal string of characters (e.g. as literal ``).
-Currently auto-installation of concepts is a feature only available on the
-command-line client. Support for the WebUI is a work in progress.
+You can also use `` in the WebGUI's prompt textbox. There is no
+autocompletion at this time.
## Installing your Own TI Files
diff --git a/docs/features/INPAINTING.md b/docs/features/INPAINTING.md
index e22fbd08e7..f3a879b190 100644
--- a/docs/features/INPAINTING.md
+++ b/docs/features/INPAINTING.md
@@ -158,7 +158,7 @@ when filling in missing regions. It has an almost uncanny ability to blend the
new regions with existing ones in a semantically coherent way.
To install the inpainting model, follow the
-[instructions](../installation/INSTALLING_MODELS.md) for installing a new model.
+[instructions](../installation/050_INSTALLING_MODELS.md) for installing a new model.
You may use either the CLI (`invoke.py` script) or directly edit the
`configs/models.yaml` configuration file to do this. The main thing to watch out
for is that the the model `config` option must be set up to use
diff --git a/docs/index.md b/docs/index.md
index a5a217ac26..3c5bd3904b 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -6,15 +6,14 @@ title: Home
The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as::
```bash
- pip install -r requirements-mkdocs.txt
+ pip install -r docs/requirements-mkdocs.txt
mkdocs serve
```
-->
+
-# ^^**InvokeAI: A Stable Diffusion Toolkit**^^ :tools: Formerly known as lstein/stable-diffusion
-
-[![project logo](assets/logo.png)](https://github.com/invoke-ai/InvokeAI)
+[![project logo](assets/invoke_ai_banner.png)](https://github.com/invoke-ai/InvokeAI)
[![discord badge]][discord link]
@@ -70,7 +69,11 @@ image-to-image generator. It provides a streamlined process with various new
features and options to aid the image generation process. It runs on Windows,
Mac and Linux machines, and runs on GPU cards with as little as 4 GB or RAM.
-**Quick links**: [Discord Server] [Code and Downloads] [Bug Reports] [Discussion, Ideas & Q&A]
+**Quick links**: [Discord Server]
+[Code and Downloads] [Bug Reports] [Discussion, Ideas &
+Q&A]
@@ -80,20 +83,19 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB or RAM.
## :octicons-package-dependencies-24: Installation
-This fork is supported across Linux, Windows and Macintosh. Linux
-users can use either an Nvidia-based card (with CUDA support) or an
-AMD card (using the ROCm driver).
+This fork is supported across Linux, Windows and Macintosh. Linux users can use
+either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
+driver).
-First time users, please see [Automated
-Installer](installation/INSTALL_AUTOMATED.md) for a walkthrough of
-getting InvokeAI up and running on your system. For alternative
-installation and upgrade instructions, please see: [InvokeAI
-Installation Overview](installation/)
+First time users, please see
+[Automated Installer](installation/INSTALL_AUTOMATED.md) for a walkthrough of
+getting InvokeAI up and running on your system. For alternative installation and
+upgrade instructions, please see:
+[InvokeAI Installation Overview](installation/)
-Linux users who wish to make use of the PyPatchMatch inpainting
-functions will need to perform a bit of extra work to enable this
-module. Instructions can be found at [Installing
-PyPatchMatch](installation/INSTALL_PATCHMATCH.md).
+Linux users who wish to make use of the PyPatchMatch inpainting functions will
+need to perform a bit of extra work to enable this module. Instructions can be
+found at [Installing PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md).
## :fontawesome-solid-computer: Hardware Requirements
@@ -102,12 +104,13 @@ PyPatchMatch](installation/INSTALL_PATCHMATCH.md).
You wil need one of the following:
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
-- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux only)
+- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
+ only)
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
-We do **not recommend** the following video cards due to issues with
-their running in half-precision mode and having insufficient VRAM to
-render 512x512 images in full-precision mode:
+We do **not recommend** the following video cards due to issues with their
+running in half-precision mode and having insufficient VRAM to render 512x512
+images in full-precision mode:
- NVIDIA 10xx series cards such as the 1080ti
- GTX 1650 series cards
@@ -131,123 +134,115 @@ render 512x512 images in full-precision mode:
```bash
(invokeai) ~/InvokeAI$ python scripts/invoke.py --full_precision
```
+
## :octicons-gift-24: InvokeAI Features
-- [The InvokeAI Web Interface](features/WEB.md)
- - [WebGUI hotkey reference guide](features/WEBUIHOTKEYS.md)
- - [WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
+- [The InvokeAI Web Interface](features/WEB.md) -
+[WebGUI hotkey reference guide](features/WEBUIHOTKEYS.md) -
+[WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
-- [The Command Line Interace](features/CLI.md)
- - [Image2Image](features/IMG2IMG.md)
- - [Inpainting](features/INPAINTING.md)
- - [Outpainting](features/OUTPAINTING.md)
- - [Adding custom styles and subjects](features/CONCEPTS.md)
- - [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
+- [The Command Line Interace](features/CLI.md) -
+[Image2Image](features/IMG2IMG.md) - [Inpainting](features/INPAINTING.md) -
+[Outpainting](features/OUTPAINTING.md) -
+[Adding custom styles and subjects](features/CONCEPTS.md) -
+[Upscaling and Face Reconstruction](features/POSTPROCESS.md)
- [Generating Variations](features/VARIATIONS.md)
- [Prompt Engineering](features/PROMPTS.md)
- Miscellaneous
- - [NSFW Checker](features/NSFW.md)
- - [Embiggen upscaling](features/EMBIGGEN.md)
- - [Other](features/OTHER.md)
+ - [NSFW Checker](features/NSFW.md)
+ - [Embiggen upscaling](features/EMBIGGEN.md)
+ - [Other](features/OTHER.md)
## :octicons-log-16: Latest Changes
-### v2.1.3 (13 November 2022)
+### v2.2.4 (11 December 2022)
-- A choice of installer scripts that automate installation and configuration. See [Installation](https://github.com/invoke-ai/InvokeAI/blob/2.1.3-rc6/docs/installation/INSTALL.md).
-- A streamlined manual installation process that works for both Conda and PIP-only installs. See [Manual Installation](https://github.com/invoke-ai/InvokeAI/blob/2.1.3-rc6/docs/installation/INSTALL_MANUAL.md).
-- The ability to save frequently-used startup options (model to load, steps, sampler, etc) in a `.invokeai` file. See [Client](https://github.com/invoke-ai/InvokeAI/blob/2.1.3-rc6/docs/features/CLI.md)
-- Support for AMD GPU cards (non-CUDA) on Linux machines.
-- Multiple bugs and edge cases squashed.
+#### the `invokeai` directory
-### v2.1.0 (2 November 2022)
+Previously there were two directories to worry about, the directory that
+contained the InvokeAI source code and the launcher scripts, and the `invokeai`
+directory that contained the models files, embeddings, configuration and
+outputs. With the 2.2.4 release, this dual system is done away with, and
+everything, including the `invoke.bat` and `invoke.sh` launcher scripts, now
+live in a directory named `invokeai`. By default this directory is located in
+your home directory (e.g. `\Users\yourname` on Windows), but you can select
+where it goes at install time.
-- [Inpainting](https://invoke-ai.github.io/InvokeAI/features/INPAINTING/)
- support in the WebGUI
-- Greatly improved navigation and user experience in the
- [WebGUI](https://invoke-ai.github.io/InvokeAI/features/WEB/)
-- The prompt syntax has been enhanced with
- [prompt weighting, cross-attention and prompt merging](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/).
-- You can now load
- [multiple models and switch among them quickly](https://docs.google.com/presentation/d/1WywGA1rny7bpFh7CLSdTr4nNpVKdlUeT0Bj0jCsILyU/edit?usp=sharing)
- without leaving the CLI.
-- The installation process (via `scripts/configure_invokeai.py`) now lets you select
- among several popular
- [Stable Diffusion models](https://invoke-ai.github.io/InvokeAI/installation/INSTALLING_MODELS/)
- and downloads and installs them on your behalf. Among other models, this
- script will install the current Stable Diffusion 1.5 model as well as a
- StabilityAI variable autoencoder (VAE) which improves face generation.
-- Tired of struggling with photoeditors to get the masked region of for
- inpainting just right? Let the AI make the mask for you using
- [text masking](https://docs.google.com/presentation/d/1pWoY510hCVjz0M6X9CBbTznZgW2W5BYNKrmZm7B45q8/edit#slide=id.p).
- This feature allows you to specify the part of the image to paint over using
- just English-language phrases.
-- Tired of seeing the head of your subjects cropped off? Uncrop them in the CLI
- with the
- [outcrop feature](https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/#outcrop).
-- Tired of seeing your subject's bodies duplicated or mangled when generating
- larger-dimension images? Check out the `--hires` option in the CLI, or select
- the corresponding toggle in the WebGUI.
-- We now support textual inversion and fine-tune .bin styles and subjects from
- the Hugging Face archive of
- [SD Concepts](https://huggingface.co/sd-concepts-library). Load the .bin file
- using the `--embedding_path` option. (The next version will support merging
- and loading of multiple simultaneous models).
-- ...
+After installation, you can delete the install directory (the one that the zip
+file creates when it unpacks). Do **not** delete or move the `invokeai`
+directory!
-### v2.0.1 (13 October 2022)
+##### Initialization file `invokeai/invokeai.init`
-- fix noisy images at high step count when using k\* samplers
-- dream.py script now calls invoke.py module directly rather than via a new
- python process (which could break the environment)
+You can place frequently-used startup options in this file, such as the default
+number of steps or your preferred sampler. To keep everything in one place, this
+file has now been moved into the `invokeai` directory and is named
+`invokeai.init`.
-### v2.0.0 (9 October 2022)
+#### To update from Version 2.2.3
-- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains for
- backward compatibility.
-- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
-- Support for
- inpainting
- and
- outpainting
-- img2img runs on all k\* samplers
-- Support for
- negative
- prompts
-- Support for CodeFormer face reconstruction
-- Support for Textual Inversion on Macintoshes
-- Support in both WebGUI and CLI for
- post-processing
- of previously-generated images using facial reconstruction, ESRGAN
- upscaling, outcropping (similar to DALL-E infinite canvas), and "embiggen"
- upscaling. See the `!fix` command.
-- New `--hires` option on `invoke>` line allows
- larger
- images to be created without duplicating elements, at the cost of some
- performance.
-- New `--perlin` and `--threshold` options allow you to add and control
- variation during image generation (see
- Thresholding
- and Perlin Noise Initialization
-- Extensive metadata now written into PNG files, allowing reliable regeneration
- of images and tweaking of previous settings.
-- Command-line completion in `invoke.py` now works on Windows, Linux and Mac
- platforms.
-- Improved
- command-line
- completion behavior. New commands added:
- - List command-line history with `!history`
- - Search command-line history with `!search`
- - Clear history with `!clear`
-- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
- configure. To switch away from auto use the new flag like
- `--precision=float32`.
+The easiest route is to download and unpack one of the 2.2.4 installer files.
+When it asks you for the location of the `invokeai` runtime directory, respond
+with the path to the directory that contains your 2.2.3 `invokeai`. That is, if
+`invokeai` lives at `C:\Users\fred\invokeai`, then answer with `C:\Users\fred`
+and answer "Y" when asked if you want to reuse the directory.
+
+The `update.sh` (`update.bat`) script that came with the 2.2.3 source installer
+does not know about the new directory layout and won't be fully functional.
+
+#### To update to 2.2.5 (and beyond) there's now an update path.
+
+As they become available, you can update to more recent versions of InvokeAI
+using an `update.sh` (`update.bat`) script located in the `invokeai` directory.
+Running it without any arguments will install the most recent version of
+InvokeAI. Alternatively, you can get set releases by running the `update.sh`
+script with an argument in the command shell. This syntax accepts the path to
+the desired release's zip file, which you can find by clicking on the green
+"Code" button on this repository's home page.
+
+#### Other 2.2.4 Improvements
+
+- Fix InvokeAI GUI initialization by @addianto in #1687
+- fix link in documentation by @lstein in #1728
+- Fix broken link by @ShawnZhong in #1736
+- Remove reference to binary installer by @lstein in #1731
+- documentation fixes for 2.2.3 by @lstein in #1740
+- Modify installer links to point closer to the source installer by @ebr in
+ #1745
+- add documentation warning about 1650/60 cards by @lstein in #1753
+- Fix Linux source URL in installation docs by @andybearman in #1756
+- Make install instructions discoverable in readme by @damian0815 in #1752
+- typo fix by @ofirkris in #1755
+- Non-interactive model download (support HUGGINGFACE_TOKEN) by @ebr in #1578
+- fix(srcinstall): shell installer - cp scripts instead of linking by @tildebyte
+ in #1765
+- stability and usage improvements to binary & source installers by @lstein in
+ #1760
+- fix off-by-one bug in cross-attention-control by @damian0815 in #1774
+- Eventually update APP_VERSION to 2.2.3 by @spezialspezial in #1768
+- invoke script cds to its location before running by @lstein in #1805
+- Make PaperCut and VoxelArt models load again by @lstein in #1730
+- Fix --embedding_directory / --embedding_path not working by @blessedcoolant in
+ #1817
+- Clean up readme by @hipsterusername in #1820
+- Optimized Docker build with support for external working directory by @ebr in
+ #1544
+- disable pushing the cloud container by @mauwii in #1831
+- Fix docker push github action and expand with additional metadata by @ebr in
+ #1837
+- Fix Broken Link To Notebook by @VedantMadane in #1821
+- Account for flat models by @spezialspezial in #1766
+- Update invoke.bat.in isolate environment variables by @lynnewu in #1833
+- Arch Linux Specific PatchMatch Instructions & fixing conda install on linux by
+ @SammCheese in #1848
+- Make force free GPU memory work in img2img by @addianto in #1844
+- New installer by @lstein
For older changelogs, please visit the
-**[CHANGELOG](CHANGELOG/#v114-11-september-2022)**.
+**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
## :material-target: Troubleshooting
diff --git a/docs/installation/010_INSTALL_AUTOMATED.md b/docs/installation/010_INSTALL_AUTOMATED.md
new file mode 100644
index 0000000000..8b1bc97be7
--- /dev/null
+++ b/docs/installation/010_INSTALL_AUTOMATED.md
@@ -0,0 +1,306 @@
+---
+title: Installing with the Automated Installer
+---
+
+# InvokeAI Automated Installation
+
+## Introduction
+
+The automated installer is a shell script that attempts to automate every step
+needed to install and run InvokeAI on a stock computer running recent versions
+of Linux, MacOS or Windows. It will leave you with a version that runs a stable
+version of InvokeAI with the option to upgrade to experimental versions later.
+
+## Walk through
+
+1. Make sure that your system meets the
+ [hardware requirements](../index.md#hardware-requirements) and has the
+ appropriate GPU drivers installed. In particular, if you are a Linux user
+ with an AMD GPU installed, you may need to install the
+ [ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
+
+ !!! info "Required Space"
+
+ Installation requires roughly 18G of free disk space to load the libraries and
+ recommended model weights files.
+
+2. Check that your system has an up-to-date Python installed. To do this, open
+ up a command-line window ("Terminal" on Linux and Macintosh, "Command" or
+ "Powershell" on Windows) and type `python --version`. If Python is
+ installed, it will print out the version number. If it is version `3.9.1` or
+ higher, you meet requirements.
+
+ !!! warning "If you see an older version, or get a command not found error"
+
+ Go to [Python Downloads](https://www.python.org/downloads/) and
+ download the appropriate installer package for your platform. We recommend
+ [Version 3.10.9](https://www.python.org/downloads/release/python-3109/),
+ which has been extensively tested with InvokeAI.
+
+ !!! warning "At this time we do not recommend Python 3.11"
+
+ _Please select your platform in the section below for platform-specific
+ setup requirements._
+
+ === "Windows users"
+
+ - During the Python configuration process,
+ look out for a checkbox to add Python to your PATH
+ and select it. If the install script complains that it can't
+ find python, then open the Python installer again and choose
+ "Modify" existing installation.
+
+ - Installation requires an up to date version of the Microsoft Visual C libraries. Please install the 2015-2022 libraries available here: https://learn.microsoft.com/en-us/cpp/windows/deploying-native-desktop-applications-visual-cpp?view=msvc-170
+
+ === "Mac users"
+
+ - After installing Python, you may need to run the
+ following command from the Terminal in order to install the Web
+ certificates needed to download model data from https sites. If
+ you see lots of CERTIFICATE ERRORS during the last part of the
+ install, this is the problem, and you can fix it with this command:
+
+ `/Applications/Python\ 3.10/Install\ Certificates.command`
+
+ - You may need to install the Xcode command line tools. These
+ are a set of tools that are needed to run certain applications in a
+ Terminal, including InvokeAI. This package is provided directly by Apple.
+
+ - To install, open a terminal window and run `xcode-select
+ --install`. You will get a macOS system popup guiding you through the
+ install. If you already have them installed, you will instead see some
+ output in the Terminal advising you that the tools are already installed.
+
+ - More information can be found here:
+ https://www.freecodecamp.org/news/install-xcode-command-line-tools/
+
+ === "Linux users"
+
+ For reasons that are not entirely clear, installing the correct version of Python can be a bit of a challenge on Ubuntu, Linux Mint, and otherUbuntu-derived distributions.
+
+ In particular, Ubuntu version 20.04 LTS comes with an old version of Python, does not come with the PIP package manager installed, and to make matters worse, the `python` command points to Python2, not Python3.
+
+ Here is the quick recipe for bringing your system up to date:
+
+ ```
+ sudo apt update
+ sudo apt install python3.9
+ sudo apt install python3-pip
+ cd /usr/bin
+ sudo ln -sf python3.9 python3
+ sudo ln -sf python3 python
+ ```
+
+ You can still access older versions of Python by calling `python2`, `python3.8`,
+ etc.
+
+3. The source installer is distributed in ZIP files. Go to the
+ [latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
+ look for a series of files named:
+
+ - [InvokeAI-installer-2.2.4-mac.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-mac.zip)
+ - [InvokeAI-installer-2.2.4-windows.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-windows.zip)
+ - [InvokeAI-installer-2.2.4-linux.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-linux.zip)
+
+ Download the one that is appropriate for your operating system.
+
+4. Unpack the zip file into a convenient directory. This will create a new
+ directory named "InvokeAI-Installer". This example shows how this would look
+ using the `unzip` command-line tool, but you may use any graphical or
+ command-line Zip extractor:
+
+ ```cmd
+ C:\Documents\Linco> unzip InvokeAI-installer-2.2.4-windows.zip
+ Archive: C: \Linco\Downloads\InvokeAI-installer-2.2.4-windows.zip
+ creating: InvokeAI-Installer\
+ inflating: InvokeAI-Installer\install.bat
+ inflating: InvokeAI-Installer\readme.txt
+ ...
+ ```
+
+ After successful installation, you can delete the `InvokeAI-Installer`
+ directory.
+
+5. **Windows only** Please double-click on the file WinLongPathsEnabled.reg and
+ accept the dialog box that asks you if you wish to modify your registry.
+ This activates long filename support on your system and will prevent
+ mysterious errors during installation.
+
+6. If you are using a desktop GUI, double-click the installer file. It will be
+ named `install.bat` on Windows systems and `install.sh` on Linux and
+ Macintosh systems.
+
+ On Windows systems you will probably get an "Untrusted Publisher" warning.
+ Click on "More Info" and select "Run Anyway." You trust us, right?
+
+7. Alternatively, from the command line, run the shell script or .bat file:
+
+ ```cmd
+ C:\Documents\Linco> cd InvokeAI-Installer
+ C:\Documents\Linco\invokeAI> install.bat
+ ```
+
+8. The script will ask you to choose where to install InvokeAI. Select a
+ directory with at least 18G of free space for a full install. InvokeAI and
+ all its support files will be installed into a new directory named
+ `invokeai` located at the location you specify.
+
+ - The default is to install the `invokeai` directory in your home directory,
+ usually `C:\Users\YourName\invokeai` on Windows systems,
+ `/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
+ on Macintoshes, where "YourName" is your login name.
+
+ - The script uses tab autocompletion to suggest directory path completions.
+ Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
+ to suggest completions.
+
+9. Sit back and let the install script work. It will install the third-party
+ libraries needed by InvokeAI, then download the current InvokeAI release and
+ install it.
+
+ Be aware that some of the library download and install steps take a long
+ time. In particular, the `pytorch` package is quite large and often appears
+ to get "stuck" at 99.9%. Have patience and the installation step will
+ eventually resume. However, there are occasions when the library install
+ does legitimately get stuck. If you have been waiting for more than ten
+ minutes and nothing is happening, you can interrupt the script with ^C. You
+ may restart it and it will pick up where it left off.
+
+10. After installation completes, the installer will launch a script called
+ `configure_invokeai.py`, which will guide you through the first-time process
+ of selecting one or more Stable Diffusion model weights files, downloading
+ and configuring them. We provide a list of popular models that InvokeAI
+ performs well with. However, you can add more weight files later on using
+ the command-line client or the Web UI. See
+ [Installing Models](050_INSTALLING_MODELS.md) for details.
+
+ Note that the main Stable Diffusion weights file is protected by a license
+ agreement that you must agree to in order to use. The script will list the
+ steps you need to take to create an account on the official site that hosts
+ the weights files, accept the agreement, and provide an access token that
+ allows InvokeAI to legally download and install the weights files.
+
+ If you have already downloaded the weights file(s) for another Stable
+ Diffusion distribution, you may skip this step (by selecting "skip" when
+ prompted) and configure InvokeAI to use the previously-downloaded files. The
+ process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
+
+11. The script will now exit and you'll be ready to generate some images. Look
+ for the directory `invokeai` installed in the location you chose at the
+ beginning of the install session. Look for a shell script named `invoke.sh`
+ (Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
+ it or typing its name at the command-line:
+
+ ```cmd
+ C:\Documents\Linco> cd invokeai
+ C:\Documents\Linco\invokeAI> invoke.bat
+ ```
+
+ - The `invoke.bat` (`invoke.sh`) script will give you the choice of starting
+ (1) the command-line interface, or (2) the web GUI. If you start the
+ latter, you can load the user interface by pointing your browser at
+ http://localhost:9090.
+
+ - The script also offers you a third option labeled "open the developer
+ console". If you choose this option, you will be dropped into a
+ command-line interface in which you can run python commands directly,
+ access developer tools, and launch InvokeAI with customized options.
+
+12. You can launch InvokeAI with several different command-line arguments that
+ customize its behavior. For example, you can change the location of the
+ image output directory, or select your favorite sampler. See the
+ [Command-Line Interface](../features/CLI.md) for a full list of the options.
+
+ - To set defaults that will take effect every time you launch InvokeAI,
+ use a text editor (e.g. Notepad) to exit the file
+ `invokeai\invokeai.init`. It contains a variety of examples that you can
+ follow to add and modify launch options.
+
+!!! warning "The `invokeai` directory contains the `invoke` application, its
+configuration files, the model weight files, and outputs of image generation.
+Once InvokeAI is installed, do not move or remove this directory."
+
+## Troubleshooting
+
+### _Package dependency conflicts_
+
+If you have previously installed InvokeAI or another Stable Diffusion package,
+the installer may occasionally pick up outdated libraries and either the
+installer or `invoke` will fail with complaints about library conflicts. You can
+address this by entering the `invokeai` directory and running `update.sh`, which
+will bring InvokeAI up to date with the latest libraries.
+
+### ldm from pypi
+
+!!! warning
+
+ Some users have tried to correct dependency problems by installing
+ the `ldm` package from PyPi.org. Unfortunately this is an unrelated package that
+ has nothing to do with the 'latent diffusion model' used by InvokeAI. Installing
+ ldm will make matters worse. If you've installed ldm, uninstall it with
+ `pip uninstall ldm`.
+
+### Corrupted configuration file
+
+Everything seems to install ok, but `invoke` complains of a corrupted
+configuration file and goes back into the configuration process (asking you to
+download models, etc), but this doesn't fix the problem.
+
+This issue is often caused by a misconfigured configuration directive in the
+`invokeai\invokeai.init` initialization file that contains startup settings. The
+easiest way to fix the problem is to move the file out of the way and re-run
+`configure_invokeai.py`. Enter the developer's console (option 3 of the launcher
+script) and run this command:
+
+```cmd
+configure_invokeai.py --root=.
+```
+
+Note the dot (.) after `--root`. It is part of the command.
+
+_If none of these maneuvers fixes the problem_ then please report the problem to
+the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
+visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
+assistance.
+
+### other problems
+
+If you run into problems during or after installation, the InvokeAI team is
+available to help you. Either create an
+[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
+make a request for help on the "bugs-and-support" channel of our
+[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
+organization, but typically somebody will be available to help you within 24
+hours, and often much sooner.
+
+## Updating to newer versions
+
+This distribution is changing rapidly, and we add new features on a daily basis.
+To update to the latest released version (recommended), run the `update.sh`
+(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
+release and re-run the `configure_invokeai` script to download any updated
+models files that may be needed. You can also use this to add additional models
+that you did not select at installation time.
+
+You can now close the developer console and run `invoke` as before. If you get
+complaints about missing models, then you may need to do the additional step of
+running `configure_invokeai.py`. This happens relatively infrequently. To do
+this, simply open up the developer's console again and type
+`python scripts/configure_invokeai.py`.
+
+You may also use the `update` script to install any selected version of
+InvokeAI. From https://github.com/invoke-ai/InvokeAI, navigate to the zip file
+link of the version you wish to install. You can find the zip links by going to
+the one of the release pages and looking for the **Assets** section at the
+bottom. Alternatively, you can browse "branches" and "tags" at the top of the
+big code directory on the InvokeAI welcome page. When you find the version you
+want to install, go to the green "<> Code" button at the top, and copy the
+"Download ZIP" link.
+
+Now run `update.sh` (or `update.bat`) with the URL of the desired InvokeAI
+version as its argument. For example, this will install the old 2.2.0 release.
+
+```cmd
+update.sh https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.0.zip
+```
+
diff --git a/docs/installation/020_INSTALL_MANUAL.md b/docs/installation/020_INSTALL_MANUAL.md
new file mode 100644
index 0000000000..94246652a2
--- /dev/null
+++ b/docs/installation/020_INSTALL_MANUAL.md
@@ -0,0 +1,579 @@
+---
+title: Installing Manually
+---
+
+
+
+!!! warning "This is for advanced Users"
+
+ who are already experienced with using conda or pip
+
+## Introduction
+
+You have two choices for manual installation, the [first
+one](#PIP_method) uses basic Python virtual environment (`venv`)
+commands and the PIP package manager. The [second one](#Conda_method)
+based on the Anaconda3 package manager (`conda`). Both methods require
+you to enter commands on the terminal, also known as the "console".
+
+Note that the conda install method is currently deprecated and will not
+be supported at some point in the future.
+
+On Windows systems you are encouraged to install and use the
+[Powershell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
+which provides compatibility with Linux and Mac shells and nice
+features such as command-line completion.
+
+## pip Install
+
+To install InvokeAI with virtual environments and the PIP package
+manager, please follow these steps:
+
+1. Make sure you are using Python 3.9 or 3.10. The rest of the install
+ procedure depends on this:
+
+ ```bash
+ python -V
+ ```
+
+2. From within the InvokeAI top-level directory, create and activate a virtual
+ environment named `invokeai`:
+
+ ```bash
+ python -mvenv invokeai
+ source invokeai/bin/activate
+ ```
+
+3. Make sure that pip is installed in your virtual environment an up to date:
+
+ ```bash
+ python -mensurepip --upgrade
+ python -mpip install --upgrade pip
+ ```
+
+4. Pick the correct `requirements*.txt` file for your hardware and operating
+ system.
+
+ We have created a series of environment files suited for different operating
+ systems and GPU hardware. They are located in the
+ `environments-and-requirements` directory:
+
+
+
+ Select the appropriate requirements file, and make a link to it from
+ `requirements.txt` in the top-level InvokeAI directory. The command to do
+ this from the top-level directory is:
+
+ !!! example ""
+
+ === "Macintosh and Linux"
+
+ !!! info "Replace `xxx` and `yyy` with the appropriate OS and GPU codes."
+
+ ```bash
+ ln -sf environments-and-requirements/requirements-xxx-yyy.txt requirements.txt
+ ```
+
+ === "Windows"
+
+ !!! info "on Windows, admin privileges are required to make links, so we use the copy command instead"
+
+ ```cmd
+ copy environments-and-requirements\requirements-lin-win-colab-cuda.txt requirements.txt
+ ```
+
+ !!! warning
+
+ Please do not link or copy `environments-and-requirements/requirements-base.txt`.
+ This is a base requirements file that does not have the platform-specific
+ libraries. Also, be sure to link or copy the platform-specific file to
+ a top-level file named `requirements.txt` as shown here. Running pip on
+ a requirements file in a subdirectory will not work as expected.
+
+ When this is done, confirm that a file named `requirements.txt` has been
+ created in the InvokeAI root directory and that it points to the correct
+ file in `environments-and-requirements`.
+
+5. Run PIP
+
+ Be sure that the `invokeai` environment is active before doing this:
+
+ ```bash
+ pip install --prefer-binary -r requirements.txt
+ ```
+
+6. Set up the runtime directory
+
+ In this step you will initialize a runtime directory that will
+ contain the models, model config files, directory for textual
+ inversion embeddings, and your outputs. This keeps the runtime
+ directory separate from the source code and aids in updating.
+
+ You may pick any location for this directory using the `--root_dir`
+ option (abbreviated --root). If you don't pass this option, it will
+ default to `invokeai` in your home directory.
+
+ ```bash
+ configure_invokeai.py --root_dir ~/Programs/invokeai
+ ```
+
+ The script `configure_invokeai.py` will interactively guide you through the
+ process of downloading and installing the weights files needed for InvokeAI.
+ Note that the main Stable Diffusion weights file is protected by a license
+ agreement that you have to agree to. The script will list the steps you need
+ to take to create an account on the site that hosts the weights files,
+ accept the agreement, and provide an access token that allows InvokeAI to
+ legally download and install the weights files.
+
+ If you get an error message about a module not being installed, check that
+ the `invokeai` environment is active and if not, repeat step 5.
+
+ Note that `configure_invokeai.py` and `invoke.py` should be installed
+ under your virtual environment directory and the system should find them
+ on the PATH. If this isn't working on your system, you can call the
+ scripts directory using `python scripts/configure_invoke.py` and
+ `python scripts/invoke.py`.
+
+ !!! tip
+
+ If you have already downloaded the weights file(s) for another Stable
+ Diffusion distribution, you may skip this step (by selecting "skip" when
+ prompted) and configure InvokeAI to use the previously-downloaded files. The
+ process for this is described in [here](050_INSTALLING_MODELS.md).
+
+7. Run the command-line- or the web- interface:
+
+ Activate the environment (with `source invokeai/bin/activate`), and then
+ run the script `invoke.py`. If you selected a non-default location
+ for the runtime directory, please specify the path with the `--root_dir`
+ option (abbreviated below as `--root`):
+
+ !!! example ""
+
+ !!! warning "Make sure that the virtual environment is activated, which should create `(invokeai)` in front of your prompt!"
+
+ === "CLI"
+
+ ```bash
+ invoke.py --root ~/Programs/invokeai
+ ```
+
+ === "local Webserver"
+
+ ```bash
+ invoke.py --web --root ~/Programs/invokeai
+ ```
+
+ === "Public Webserver"
+
+ ```bash
+ invoke.py --web --host 0.0.0.0 --root ~/Programs/invokeai
+ ```
+
+ If you choose the run the web interface, point your browser at
+ http://localhost:9090 in order to load the GUI.
+
+ !!! tip
+
+ You can permanently set the location of the runtime directory by setting the environment variable INVOKEAI_ROOT to the path of the directory.
+
+8. Render away!
+
+ Browse the [features](../features/CLI.md) section to learn about all the things you
+ can do with InvokeAI.
+
+ Note that some GPUs are slow to warm up. In particular, when using an AMD
+ card with the ROCm driver, you may have to wait for over a minute the first
+ time you try to generate an image. Fortunately, after the warm up period
+ rendering will be fast.
+
+9. Subsequently, to relaunch the script, be sure to run "conda activate
+ invokeai", enter the `InvokeAI` directory, and then launch the invoke
+ script. If you forget to activate the 'invokeai' environment, the script
+ will fail with multiple `ModuleNotFound` errors.
+
+ !!! tip
+
+ Do not move the source code repository after installation. The virtual environment directory has absolute paths in it that get confused if the directory is moved.
+
+---
+
+### Conda method
+
+1. Check that your system meets the
+ [hardware requirements](index.md#Hardware_Requirements) and has the
+ appropriate GPU drivers installed. In particular, if you are a Linux user
+ with an AMD GPU installed, you may need to install the
+ [ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
+
+ InvokeAI does not yet support Windows machines with AMD GPUs due to the lack
+ of ROCm driver support on this platform.
+
+ To confirm that the appropriate drivers are installed, run `nvidia-smi` on
+ NVIDIA/CUDA systems, and `rocm-smi` on AMD systems. These should return
+ information about the installed video card.
+
+ Macintosh users with MPS acceleration, or anybody with a CPU-only system,
+ can skip this step.
+
+2. You will need to install Anaconda3 and Git if they are not already
+ available. Use your operating system's preferred package manager, or
+ download the installers manually. You can find them here:
+
+ - [Anaconda3](https://www.anaconda.com/)
+ - [git](https://git-scm.com/downloads)
+
+3. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
+ GitHub:
+
+ ```bash
+ git clone https://github.com/invoke-ai/InvokeAI.git
+ ```
+
+ This will create InvokeAI folder where you will follow the rest of the
+ steps.
+
+4. Enter the newly-created InvokeAI folder:
+
+ ```bash
+ cd InvokeAI
+ ```
+
+ From this step forward make sure that you are working in the InvokeAI
+ directory!
+
+5. Select the appropriate environment file:
+
+ We have created a series of environment files suited for different operating
+ systems and GPU hardware. They are located in the
+ `environments-and-requirements` directory:
+
+
+
+ Choose the appropriate environment file for your system and link or copy it
+ to `environment.yml` in InvokeAI's top-level directory. To do so, run
+ following command from the repository-root:
+
+ !!! Example ""
+
+ === "Macintosh and Linux"
+
+ !!! todo "Replace `xxx` and `yyy` with the appropriate OS and GPU codes as seen in the table above"
+
+ ```bash
+ ln -sf environments-and-requirements/environment-xxx-yyy.yml environment.yml
+ ```
+
+ When this is done, confirm that a file `environment.yml` has been linked in
+ the InvokeAI root directory and that it points to the correct file in the
+ `environments-and-requirements`.
+
+ ```bash
+ ls -la
+ ```
+
+ === "Windows"
+
+ !!! todo " Since it requires admin privileges to create links, we will use the copy command to create your `environment.yml`"
+
+ ```cmd
+ copy environments-and-requirements\environment-win-cuda.yml environment.yml
+ ```
+
+ Afterwards verify that the file `environment.yml` has been created, either via the
+ explorer or by using the command `dir` from the terminal
+
+ ```cmd
+ dir
+ ```
+
+ !!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
+
+6. Create the conda environment:
+
+ ```bash
+ conda env update
+ ```
+
+ This will create a new environment named `invokeai` and install all InvokeAI
+ dependencies into it. If something goes wrong you should take a look at
+ [troubleshooting](#troubleshooting).
+
+7. Activate the `invokeai` environment:
+
+ In order to use the newly created environment you will first need to
+ activate it
+
+ ```bash
+ conda activate invokeai
+ ```
+
+ Your command-line prompt should change to indicate that `invokeai` is active
+ by prepending `(invokeai)`.
+
+8. Set up the runtime directory
+
+ In this step you will initialize a runtime directory that will
+ contain the models, model config files, directory for textual
+ inversion embeddings, and your outputs. This keeps the runtime
+ directory separate from the source code and aids in updating.
+
+ You may pick any location for this directory using the `--root_dir`
+ option (abbreviated --root). If you don't pass this option, it will
+ default to `invokeai` in your home directory.
+
+ ```bash
+ python scripts/configure_invokeai.py --root_dir ~/Programs/invokeai
+ ```
+
+ The script `configure_invokeai.py` will interactively guide you through the
+ process of downloading and installing the weights files needed for InvokeAI.
+ Note that the main Stable Diffusion weights file is protected by a license
+ agreement that you have to agree to. The script will list the steps you need
+ to take to create an account on the site that hosts the weights files,
+ accept the agreement, and provide an access token that allows InvokeAI to
+ legally download and install the weights files.
+
+ If you get an error message about a module not being installed, check that
+ the `invokeai` environment is active and if not, repeat step 5.
+
+ Note that `configure_invokeai.py` and `invoke.py` should be
+ installed under your conda directory and the system should find
+ them automatically on the PATH. If this isn't working on your
+ system, you can call the scripts directory using `python
+ scripts/configure_invoke.py` and `python scripts/invoke.py`.
+
+ !!! tip
+
+ If you have already downloaded the weights file(s) for another Stable
+ Diffusion distribution, you may skip this step (by selecting "skip" when
+ prompted) and configure InvokeAI to use the previously-downloaded files. The
+ process for this is described in [here](050_INSTALLING_MODELS.md).
+
+9. Run the command-line- or the web- interface:
+
+ Activate the environment (with `source invokeai/bin/activate`), and then
+ run the script `invoke.py`. If you selected a non-default location
+ for the runtime directory, please specify the path with the `--root_dir`
+ option (abbreviated below as `--root`):
+
+ !!! example ""
+
+ !!! warning "Make sure that the conda environment is activated, which should create `(invokeai)` in front of your prompt!"
+
+ === "CLI"
+
+ ```bash
+ invoke.py --root ~/Programs/invokeai
+ ```
+
+ === "local Webserver"
+
+ ```bash
+ invoke.py --web --root ~/Programs/invokeai
+ ```
+
+ === "Public Webserver"
+
+ ```bash
+ invoke.py --web --host 0.0.0.0 --root ~/Programs/invokeai
+ ```
+
+ If you choose the run the web interface, point your browser at
+ http://localhost:9090 in order to load the GUI.
+
+ !!! tip
+
+ You can permanently set the location of the runtime directory by setting the environment variable INVOKEAI_ROOT to the path of your choice.
+
+10. Render away!
+
+ Browse the [features](../features/CLI.md) section to learn about all the things you
+ can do with InvokeAI.
+
+ Note that some GPUs are slow to warm up. In particular, when using an AMD
+ card with the ROCm driver, you may have to wait for over a minute the first
+ time you try to generate an image. Fortunately, after the warm up period
+ rendering will be fast.
+
+11. Subsequently, to relaunch the script, be sure to run "conda activate
+ invokeai", enter the `InvokeAI` directory, and then launch the invoke
+ script. If you forget to activate the 'invokeai' environment, the script
+ will fail with multiple `ModuleNotFound` errors.
+
+## Creating an "install" version of InvokeAI
+
+If you wish you can install InvokeAI and all its dependencies in the
+runtime directory. This allows you to delete the source code
+repository and eliminates the need to provide `--root_dir` at startup
+time. Note that this method only works with the PIP method.
+
+1. Follow the instructions for the PIP install, but in step #2 put the
+ virtual environment into the runtime directory. For example, assuming the
+ runtime directory lives in `~/Programs/invokeai`, you'd run:
+
+ ```bash
+ python -menv ~/Programs/invokeai
+ ```
+
+2. Now follow steps 3 to 5 in the PIP recipe, ending with the `pip install`
+ step.
+
+3. Run one additional step while you are in the source code repository
+ directory `pip install .` (note the dot at the end).
+
+4. That's all! Now, whenever you activate the virtual environment,
+ `invoke.py` will know where to look for the runtime directory without
+ needing a `--root_dir` argument. In addition, you can now move or
+ delete the source code repository entirely.
+
+ (Don't move the runtime directory!)
+
+## Updating to newer versions of the script
+
+This distribution is changing rapidly. If you used the `git clone` method
+(step 5) to download the InvokeAI directory, then to update to the latest and
+greatest version, launch the Anaconda window, enter `InvokeAI` and type:
+
+```bash
+git pull
+conda env update
+python scripts/configure_invokeai.py --no-interactive #optional
+```
+
+This will bring your local copy into sync with the remote one. The last step may
+be needed to take advantage of new features or released models. The
+`--no-interactive` flag will prevent the script from prompting you to download
+the big Stable Diffusion weights files.
+
+## Troubleshooting
+
+Here are some common issues and their suggested solutions.
+
+### Conda
+
+#### Conda fails before completing `conda update`
+
+The usual source of these errors is a package incompatibility. While we have
+tried to minimize these, over time packages get updated and sometimes introduce
+incompatibilities.
+
+We suggest that you search
+[Issues](https://github.com/invoke-ai/InvokeAI/issues) or the "bugs-and-support"
+channel of the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
+
+You may also try to install the broken packages manually using PIP. To do this,
+activate the `invokeai` environment, and run `pip install` with the name and
+version of the package that is causing the incompatibility. For example:
+
+```bash
+pip install test-tube==0.7.5
+```
+
+You can keep doing this until all requirements are satisfied and the `invoke.py`
+script runs without errors. Please report to
+[Issues](https://github.com/invoke-ai/InvokeAI/issues) what you were able to do
+to work around the problem so that others can benefit from your investigation.
+
+### Create Conda Environment fails on MacOS
+
+If conda create environment fails with lmdb error, this is most likely caused by Clang.
+Run brew config to see which Clang is installed on your Mac. If Clang isn't installed, that's causing the error.
+Start by installing additional XCode command line tools, followed by brew install llvm.
+
+```bash
+xcode-select --install
+brew install llvm
+```
+
+If brew config has Clang installed, update to the latest llvm and try creating the environment again.
+
+#### `configure_invokeai.py` or `invoke.py` crashes at an early stage
+
+This is usually due to an incomplete or corrupted Conda install. Make sure you
+have linked to the correct environment file and run `conda update` again.
+
+If the problem persists, a more extreme measure is to clear Conda's caches and
+remove the `invokeai` environment:
+
+```bash
+conda deactivate
+conda env remove -n invokeai
+conda clean -a
+conda update
+```
+
+This removes all cached library files, including ones that may have been
+corrupted somehow. (This is not supposed to happen, but does anyway).
+
+#### `invoke.py` crashes at a later stage
+
+If the CLI or web site had been working ok, but something unexpected happens
+later on during the session, you've encountered a code bug that is probably
+unrelated to an install issue. Please search
+[Issues](https://github.com/invoke-ai/InvokeAI/issues), file a bug report, or
+ask for help on [Discord](https://discord.gg/ZmtBAhwWhy)
+
+#### My renders are running very slowly
+
+You may have installed the wrong torch (machine learning) package, and the
+system is running on CPU rather than the GPU. To check, look at the log messages
+that appear when `invoke.py` is first starting up. One of the earlier lines
+should say `Using device type cuda`. On AMD systems, it will also say "cuda",
+and on Macintoshes, it should say "mps". If instead the message says it is
+running on "cpu", then you may need to install the correct torch library.
+
+You may be able to fix this by installing a different torch library. Here are
+the magic incantations for Conda and PIP.
+
+!!! todo "For CUDA systems"
+
+ - conda
+
+ ```bash
+ conda install pytorch torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia
+ ```
+
+ - pip
+
+ ```bash
+ pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116
+ ```
+
+!!! todo "For AMD systems"
+
+ - conda
+
+ ```bash
+ conda activate invokeai
+ pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
+ ```
+
+ - pip
+
+ ```bash
+ pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
+ ```
+
+More information and troubleshooting tips can be found at https://pytorch.org.
diff --git a/docs/installation/INSTALL_DOCKER.md b/docs/installation/040_INSTALL_DOCKER.md
similarity index 90%
rename from docs/installation/INSTALL_DOCKER.md
rename to docs/installation/040_INSTALL_DOCKER.md
index 9f0b203930..c7c2d6adae 100644
--- a/docs/installation/INSTALL_DOCKER.md
+++ b/docs/installation/040_INSTALL_DOCKER.md
@@ -1,5 +1,5 @@
---
-title: Docker
+title: Installing with Docker
---
# :fontawesome-brands-docker: Docker
@@ -78,15 +78,16 @@ Some Suggestions of variables you may want to change besides the Token:
@@ -129,7 +130,7 @@ also do so.
## Running the container on your GPU
-If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running the container with an extra
+If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running the container with an extra
environment variable to enable GPU usage and have the process run much faster:
```bash
diff --git a/docs/installation/INSTALLING_MODELS.md b/docs/installation/050_INSTALLING_MODELS.md
similarity index 100%
rename from docs/installation/INSTALLING_MODELS.md
rename to docs/installation/050_INSTALLING_MODELS.md
diff --git a/docs/installation/INSTALL_PATCHMATCH.md b/docs/installation/060_INSTALL_PATCHMATCH.md
similarity index 100%
rename from docs/installation/INSTALL_PATCHMATCH.md
rename to docs/installation/060_INSTALL_PATCHMATCH.md
diff --git a/docs/installation/BUILDING_BINARY_INSTALLERS.md b/docs/installation/Developers_documentation/BUILDING_BINARY_INSTALLERS.md
similarity index 100%
rename from docs/installation/BUILDING_BINARY_INSTALLERS.md
rename to docs/installation/Developers_documentation/BUILDING_BINARY_INSTALLERS.md
diff --git a/docs/installation/INSTALL_AUTOMATED.md b/docs/installation/INSTALL_AUTOMATED.md
deleted file mode 100644
index 9b33e1e5fd..0000000000
--- a/docs/installation/INSTALL_AUTOMATED.md
+++ /dev/null
@@ -1,315 +0,0 @@
----
-title: InvokeAI Automated Installation
----
-
-# InvokeAI Automated Installation
-
-## Introduction
-
-The automated installer is a shell script that attempts to automate every step
-needed to install and run InvokeAI on a stock computer running recent versions
-of Linux, MacOS or Windows. It will leave you with a version that runs a stable
-version of InvokeAI with the option to upgrade to experimental versions later.
-
-## Walk through
-
-1. Make sure that your system meets the
- [hardware requirements](../index.md#hardware-requirements) and has the
- appropriate GPU drivers installed. In particular, if you are a Linux user
- with an AMD GPU installed, you may need to install the
- [ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
-
- !!! info "Required Space"
-
- Installation requires roughly 18G of free disk space to load the libraries and
- recommended model weights files.
-
-2. Check that your system has an up-to-date Python installed. To do this, open
- up a command-line window ("Terminal" on Linux and Macintosh, "Command" or
- "Powershell" on Windows) and type `python --version`. If Python is
- installed, it will print out the version number. If it is version `3.9.1` or
- higher, you meet requirements.
-
- !!! warning "If you see an older version, or get a command not found error"
-
- Go to [Python Downloads](https://www.python.org/downloads/) and
- download the appropriate installer package for your platform. We recommend
- [Version 3.10.9](https://www.python.org/downloads/release/python-3109/),
- which has been extensively tested with InvokeAI.
-
- !!! warning "At this time we do not recommend Python 3.11"
-
- === "Windows users"
-
- - During the Python configuration process,
- Please look out for a checkbox to add Python to your PATH
- and select it. If the install script complains that it can't
- find python, then open the Python installer again and choose
- "Modify" existing installation.
-
- - There is a slight possibility that you will encountered
- DLL load errors at the very end of the installation process. This is caused
- by not having up to date Visual C++ redistributable libraries. If this
- happens to you, you can install the C++ libraries from this site:
- https://learn.microsoft.com/en-us/cpp/windows/deploying-native-desktop-applications-visual-cpp?view=msvc-170
-
- === "Mac users"
-
- - After installing Python, you may need to run the
- following command from the Terminal in order to install the Web
- certificates needed to download model data from https sites. If
- you see lots of CERTIFICATE ERRORS during the last part of the
- install, this is the problem, and you can fix it with this command:
-
- `/Applications/Python\ 3.10/Install\ Certificates.command`
-
- - You may need to install the Xcode command line tools. These
- are a set of tools that are needed to run certain applications in a
- Terminal, including InvokeAI. This package is provided directly by Apple.
-
- - To install, open a terminal window and run `xcode-select
- --install`. You will get a macOS system popup guiding you through the
- install. If you already have them installed, you will instead see some
- output in the Terminal advising you that the tools are already installed.
-
- - More information can be found here:
- https://www.freecodecamp.org/news/install-xcode-command-line-tools/
-
- === "Linux users"
-
- - See [Installing Python in Ubuntu](#installing-python-in-ubuntu) for some
- platform-specific tips.
-
-3. The source installer is distributed in ZIP files. Go to the
- [latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
- look for a series of files named:
-
- - [InvokeAI-installer-2.2.4-mac.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-mac.zip)
- - [InvokeAI-installer-2.2.4-windows.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-windows.zip)
- - [InvokeAI-installer-2.2.4-linux.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-linux.zip)
-
- Download the one that is appropriate for your operating system.
-
-4. Unpack the zip file into a convenient directory. This will create a new
- directory named "InvokeAI-Installer". This example shows how this would look
- using the `unzip` command-line tool, but you may use any graphical or
- command-line Zip extractor:
-
- ```cmd
- C:\Documents\Linco> unzip InvokeAI-installer-2.2.4-windows.zip
- Archive: C: \Linco\Downloads\InvokeAI-installer-2.2.4-windows.zip
- creating: InvokeAI-Installer\
- inflating: InvokeAI-Installer\install.bat
- inflating: InvokeAI-Installer\readme.txt
- ...
- ```
-
- After successful installation, you can delete the `InvokeAI-Installer`
- directory.
-
-5. **Windows only** Please double-click on the file WinLongPathsEnabled.reg and
- accept the dialog box that asks you if you wish to modify your registry.
- This activates long filename support on your system and will prevent
- mysterious errors during installation.
-
-6. If you are using a desktop GUI, double-click the installer file. It will be
- named `install.bat` on Windows systems and `install.sh` on Linux and
- Macintosh systems.
-
- On Windows systems you will probably get an "Untrusted Publisher" warning.
- Click on "More Info" and select "Run Anyway." You trust us, right?
-
-7. Alternatively, from the command line, run the shell script or .bat file:
-
- ```cmd
- C:\Documents\Linco> cd InvokeAI-Installer
- C:\Documents\Linco\invokeAI> install.bat
- ```
-
-8. The script will ask you to choose where to install InvokeAI. Select a
- directory with at least 18G of free space for a full install. InvokeAI and
- all its support files will be installed into a new directory named
- `invokeai` located at the location you specify.
-
- - The default is to install the `invokeai` directory in your home directory,
- usually `C:\Users\YourName\invokeai` on Windows systems,
- `/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
- on Macintoshes, where "YourName" is your login name.
-
- - The script uses tab autocompletion to suggest directory path completions.
- Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
- to suggest completions.
-
-9. Sit back and let the install script work. It will install the third-party
- libraries needed by InvokeAI, then download the current InvokeAI release and
- install it.
-
- Be aware that some of the library download and install steps take a long
- time. In particular, the `pytorch` package is quite large and often appears
- to get "stuck" at 99.9%. Have patience and the installation step will
- eventually resume. However, there are occasions when the library install
- does legitimately get stuck. If you have been waiting for more than ten
- minutes and nothing is happening, you can interrupt the script with ^C. You
- may restart it and it will pick up where it left off.
-
-10. After installation completes, the installer will launch a script called
- `configure_invokeai.py`, which will guide you through the first-time process
- of selecting one or more Stable Diffusion model weights files, downloading
- and configuring them. We provide a list of popular models that InvokeAI
- performs well with. However, you can add more weight files later on using
- the command-line client or the Web UI. See
- [Installing Models](INSTALLING_MODELS.md) for details.
-
- Note that the main Stable Diffusion weights file is protected by a license
- agreement that you must agree to in order to use. The script will list the
- steps you need to take to create an account on the official site that hosts
- the weights files, accept the agreement, and provide an access token that
- allows InvokeAI to legally download and install the weights files.
-
- If you have already downloaded the weights file(s) for another Stable
- Diffusion distribution, you may skip this step (by selecting "skip" when
- prompted) and configure InvokeAI to use the previously-downloaded files. The
- process for this is described in [Installing Models](INSTALLING_MODELS.md).
-
-11. The script will now exit and you'll be ready to generate some images. Look
- for the directory `invokeai` installed in the location you chose at the
- beginning of the install session. Look for a shell script named `invoke.sh`
- (Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
- it or typing its name at the command-line:
-
- ```cmd
- C:\Documents\Linco> cd invokeai
- C:\Documents\Linco\invokeAI> invoke.bat
- ```
-
- - The `invoke.bat` (`invoke.sh`) script will give you the choice of starting
- (1) the command-line interface, or (2) the web GUI. If you start the
- latter, you can load the user interface by pointing your browser at
- http://localhost:9090.
-
- - The script also offers you a third option labeled "open the developer
- console". If you choose this option, you will be dropped into a
- command-line interface in which you can run python commands directly,
- access developer tools, and launch InvokeAI with customized options.
-
-12. You can launch InvokeAI with several different command-line arguments that
- customize its behavior. For example, you can change the location of the
- image output directory, or select your favorite sampler. See the
- [Command-Line Interface](../features/CLI.md) for a full list of the options.
-
- - To set defaults that will take effect every time you launch InvokeAI,
- use a text editor (e.g. Notepad) to exit the file
- `invokeai\invokeai.init`. It contains a variety of examples that you can
- follow to add and modify launch options.
-
-!!! warning "The `invokeai` directory contains the `invoke` application, its
-configuration files, the model weight files, and outputs of image generation.
-Once InvokeAI is installed, do not move or remove this directory."
-
-## Troubleshooting
-
-### _Package dependency conflicts_
-
-If you have previously installed InvokeAI or another Stable Diffusion package,
-the installer may occasionally pick up outdated libraries and either the
-installer or `invoke` will fail with complaints about library conflicts. You can
-address this by entering the `invokeai` directory and running `update.sh`, which
-will bring InvokeAI up to date with the latest libraries.
-
-### ldm from pypi
-
-!!! warning
-
- Some users have tried to correct dependency problems by installing
- the `ldm` package from PyPi.org. Unfortunately this is an unrelated package that
- has nothing to do with the 'latent diffusion model' used by InvokeAI. Installing
- ldm will make matters worse. If you've installed ldm, uninstall it with
- `pip uninstall ldm`.
-
-### Corrupted configuration file
-
-Everything seems to install ok, but `invoke` complains of a corrupted
-configuration file and goes back into the configuration process (asking you to
-download models, etc), but this doesn't fix the problem.
-
-This issue is often caused by a misconfigured configuration directive in the
-`invokeai\invokeai.init` initialization file that contains startup settings. The
-easiest way to fix the problem is to move the file out of the way and re-run
-`configure_invokeai.py`. Enter the developer's console (option 3 of the launcher
-script) and run this command:
-
-```cmd
-configure_invokeai.py --root=.
-```
-
-Note the dot (.) after `--root`. It is part of the command.
-
-_If none of these maneuvers fixes the problem_ then please report the problem to
-the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
-visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
-assistance.
-
-### other problems
-
-If you run into problems during or after installation, the InvokeAI team is
-available to help you. Either create an
-[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
-make a request for help on the "bugs-and-support" channel of our
-[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
-organization, but typically somebody will be available to help you within 24
-hours, and often much sooner.
-
-## Updating to newer versions
-
-This distribution is changing rapidly, and we add new features on a daily basis.
-To update to the latest released version (recommended), run the `update.sh`
-(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
-release and re-run the `configure_invokeai` script to download any updated
-models files that may be needed. You can also use this to add additional models
-that you did not select at installation time.
-
-You can now close the developer console and run `invoke` as before. If you get
-complaints about missing models, then you may need to do the additional step of
-running `configure_invokeai.py`. This happens relatively infrequently. To do
-this, simply open up the developer's console again and type
-`python scripts/configure_invokeai.py`.
-
-You may also use the `update` script to install any selected version of
-InvokeAI. From https://github.com/invoke-ai/InvokeAI, navigate to the zip file
-link of the version you wish to install. You can find the zip links by going to
-the one of the release pages and looking for the **Assets** section at the
-bottom. Alternatively, you can browse "branches" and "tags" at the top of the
-big code directory on the InvokeAI welcome page. When you find the version you
-want to install, go to the green "<> Code" button at the top, and copy the
-"Download ZIP" link.
-
-Now run `update.sh` (or `update.bat`) with the URL of the desired InvokeAI
-version as its argument. For example, this will install the old 2.2.0 release.
-
-```cmd
-update.sh https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.0.zip
-```
-
-## Installing Python in Ubuntu
-
-For reasons that are not entirely clear, installing the correct version of
-Python can be a bit of a challenge on Ubuntu, Linux Mint, and other
-Ubuntu-derived distributions.
-
-In particular, Ubuntu version 20.04 LTS comes with an old version of Python,
-does not come with the PIP package manager installed, and to make matters worse,
-the `python` command points to Python2, not Python3.
-
-Here is the quick recipe for bringing your system up to date:
-
-```
-sudo apt update
-sudo apt install python3.9
-sudo apt install python3-pip
-cd /usr/bin
-sudo ln -sf python3.9 python3
-sudo ln -sf python3 python
-```
-
-You can still access older versions of Python by calling `python2`, `python3.8`,
-etc.
diff --git a/docs/installation/INSTALL_AUTOMATED.md b/docs/installation/INSTALL_AUTOMATED.md
new file mode 120000
index 0000000000..1818736494
--- /dev/null
+++ b/docs/installation/INSTALL_AUTOMATED.md
@@ -0,0 +1 @@
+010_INSTALL_AUTOMATED.md
\ No newline at end of file
diff --git a/docs/installation/INSTALL_MANUAL.md b/docs/installation/INSTALL_MANUAL.md
deleted file mode 100644
index d94c441776..0000000000
--- a/docs/installation/INSTALL_MANUAL.md
+++ /dev/null
@@ -1,429 +0,0 @@
----
-title: Manual Installation
----
-
-
-
-!!! warning "This is for advanced Users"
-
- who are already experienced with using conda or pip
-
-## Introduction
-
-You have two choices for manual installation, the [first one](#Conda_method)
-based on the Anaconda3 package manager (`conda`), and
-[a second one](#PIP_method) which uses basic Python virtual environment (`venv`)
-commands and the PIP package manager. Both methods require you to enter commands
-on the terminal, also known as the "console".
-
-On Windows systems you are encouraged to install and use the
-[Powershell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
-which provides compatibility with Linux and Mac shells and nice features such as
-command-line completion.
-
-### Conda method
-
-1. Check that your system meets the
- [hardware requirements](index.md#Hardware_Requirements) and has the
- appropriate GPU drivers installed. In particular, if you are a Linux user
- with an AMD GPU installed, you may need to install the
- [ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
-
- InvokeAI does not yet support Windows machines with AMD GPUs due to the lack
- of ROCm driver support on this platform.
-
- To confirm that the appropriate drivers are installed, run `nvidia-smi` on
- NVIDIA/CUDA systems, and `rocm-smi` on AMD systems. These should return
- information about the installed video card.
-
- Macintosh users with MPS acceleration, or anybody with a CPU-only system,
- can skip this step.
-
-2. You will need to install Anaconda3 and Git if they are not already
- available. Use your operating system's preferred package manager, or
- download the installers manually. You can find them here:
-
- - [Anaconda3](https://www.anaconda.com/)
- - [git](https://git-scm.com/downloads)
-
-3. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
- GitHub:
-
- ```bash
- git clone https://github.com/invoke-ai/InvokeAI.git
- ```
-
- This will create InvokeAI folder where you will follow the rest of the
- steps.
-
-4. Enter the newly-created InvokeAI folder:
-
- ```bash
- cd InvokeAI
- ```
-
- From this step forward make sure that you are working in the InvokeAI
- directory!
-
-5. Select the appropriate environment file:
-
- We have created a series of environment files suited for different operating
- systems and GPU hardware. They are located in the
- `environments-and-requirements` directory:
-
-
-
- Choose the appropriate environment file for your system and link or copy it
- to `environment.yml` in InvokeAI's top-level directory. To do so, run
- following command from the repository-root:
-
- !!! Example ""
-
- === "Macintosh and Linux"
-
- !!! todo "Replace `xxx` and `yyy` with the appropriate OS and GPU codes as seen in the table above"
-
- ```bash
- ln -sf environments-and-requirements/environment-xxx-yyy.yml environment.yml
- ```
-
- When this is done, confirm that a file `environment.yml` has been linked in
- the InvokeAI root directory and that it points to the correct file in the
- `environments-and-requirements`.
-
- ```bash
- ls -la
- ```
-
- === "Windows"
-
- !!! todo " Since it requires admin privileges to create links, we will use the copy command to create your `environment.yml`"
-
- ```cmd
- copy environments-and-requirements\environment-win-cuda.yml environment.yml
- ```
-
- Afterwards verify that the file `environment.yml` has been created, either via the
- explorer or by using the command `dir` from the terminal
-
- ```cmd
- dir
- ```
-
- !!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
-
-6. Create the conda environment:
-
- ```bash
- conda env update
- ```
-
- This will create a new environment named `invokeai` and install all InvokeAI
- dependencies into it. If something goes wrong you should take a look at
- [troubleshooting](#troubleshooting).
-
-7. Activate the `invokeai` environment:
-
- In order to use the newly created environment you will first need to
- activate it
-
- ```bash
- conda activate invokeai
- ```
-
- Your command-line prompt should change to indicate that `invokeai` is active
- by prepending `(invokeai)`.
-
-8. Pre-Load the model weights files:
-
- !!! tip
-
- If you have already downloaded the weights file(s) for another Stable
- Diffusion distribution, you may skip this step (by selecting "skip" when
- prompted) and configure InvokeAI to use the previously-downloaded files. The
- process for this is described in [here](INSTALLING_MODELS.md).
-
- ```bash
- python scripts/configure_invokeai.py
- ```
-
- The script `configure_invokeai.py` will interactively guide you through the
- process of downloading and installing the weights files needed for InvokeAI.
- Note that the main Stable Diffusion weights file is protected by a license
- agreement that you have to agree to. The script will list the steps you need
- to take to create an account on the site that hosts the weights files,
- accept the agreement, and provide an access token that allows InvokeAI to
- legally download and install the weights files.
-
- If you get an error message about a module not being installed, check that
- the `invokeai` environment is active and if not, repeat step 5.
-
-9. Run the command-line- or the web- interface:
-
- !!! example ""
-
- !!! warning "Make sure that the conda environment is activated, which should create `(invokeai)` in front of your prompt!"
-
- === "CLI"
-
- ```bash
- python scripts/invoke.py
- ```
-
- === "local Webserver"
-
- ```bash
- python scripts/invoke.py --web
- ```
-
- === "Public Webserver"
-
- ```bash
- python scripts/invoke.py --web --host 0.0.0.0
- ```
-
- If you choose the run the web interface, point your browser at
- http://localhost:9090 in order to load the GUI.
-
-10. Render away!
-
- Browse the [features](../features/CLI.md) section to learn about all the things you
- can do with InvokeAI.
-
- Note that some GPUs are slow to warm up. In particular, when using an AMD
- card with the ROCm driver, you may have to wait for over a minute the first
- time you try to generate an image. Fortunately, after the warm up period
- rendering will be fast.
-
-11. Subsequently, to relaunch the script, be sure to run "conda activate
- invokeai", enter the `InvokeAI` directory, and then launch the invoke
- script. If you forget to activate the 'invokeai' environment, the script
- will fail with multiple `ModuleNotFound` errors.
-
-## Updating to newer versions of the script
-
-This distribution is changing rapidly. If you used the `git clone` method
-(step 5) to download the InvokeAI directory, then to update to the latest and
-greatest version, launch the Anaconda window, enter `InvokeAI` and type:
-
-```bash
-git pull
-conda env update
-python scripts/configure_invokeai.py --no-interactive #optional
-```
-
-This will bring your local copy into sync with the remote one. The last step may
-be needed to take advantage of new features or released models. The
-`--no-interactive` flag will prevent the script from prompting you to download
-the big Stable Diffusion weights files.
-
-## pip Install
-
-To install InvokeAI with only the PIP package manager, please follow these
-steps:
-
-1. Make sure you are using Python 3.9 or higher. The rest of the install
- procedure depends on this:
-
- ```bash
- python -V
- ```
-
-2. Install the `virtualenv` tool if you don't have it already:
-
- ```bash
- pip install virtualenv
- ```
-
-3. From within the InvokeAI top-level directory, create and activate a virtual
- environment named `invokeai`:
-
- ```bash
- virtualenv invokeai
- source invokeai/bin/activate
- ```
-
-4. Pick the correct `requirements*.txt` file for your hardware and operating
- system.
-
- We have created a series of environment files suited for different operating
- systems and GPU hardware. They are located in the
- `environments-and-requirements` directory:
-
-
-
- Select the appropriate requirements file, and make a link to it from
- `requirements.txt` in the top-level InvokeAI directory. The command to do
- this from the top-level directory is:
-
- !!! example ""
-
- === "Macintosh and Linux"
-
- !!! info "Replace `xxx` and `yyy` with the appropriate OS and GPU codes."
-
- ```bash
- ln -sf environments-and-requirements/requirements-xxx-yyy.txt requirements.txt
- ```
-
- === "Windows"
-
- !!! info "on Windows, admin privileges are required to make links, so we use the copy command instead"
-
- ```cmd
- copy environments-and-requirements\requirements-lin-win-colab-cuda.txt requirements.txt
- ```
-
- !!! warning
-
- Please do not link or copy `environments-and-requirements/requirements-base.txt`.
- This is a base requirements file that does not have the platform-specific
- libraries. Also, be sure to link or copy the platform-specific file to
- a top-level file named `requirements.txt` as shown here. Running pip on
- a requirements file in a subdirectory will not work as expected.
-
- When this is done, confirm that a file named `requirements.txt` has been
- created in the InvokeAI root directory and that it points to the correct
- file in `environments-and-requirements`.
-
-5. Run PIP
-
- Be sure that the `invokeai` environment is active before doing this:
-
- ```bash
- pip install --prefer-binary -r requirements.txt
- ```
-
----
-
-## Troubleshooting
-
-Here are some common issues and their suggested solutions.
-
-### Conda
-
-#### Conda fails before completing `conda update`
-
-The usual source of these errors is a package incompatibility. While we have
-tried to minimize these, over time packages get updated and sometimes introduce
-incompatibilities.
-
-We suggest that you search
-[Issues](https://github.com/invoke-ai/InvokeAI/issues) or the "bugs-and-support"
-channel of the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
-
-You may also try to install the broken packages manually using PIP. To do this,
-activate the `invokeai` environment, and run `pip install` with the name and
-version of the package that is causing the incompatibility. For example:
-
-```bash
-pip install test-tube==0.7.5
-```
-
-You can keep doing this until all requirements are satisfied and the `invoke.py`
-script runs without errors. Please report to
-[Issues](https://github.com/invoke-ai/InvokeAI/issues) what you were able to do
-to work around the problem so that others can benefit from your investigation.
-
-### Create Conda Environment fails on MacOS
-
-If conda create environment fails with lmdb error, this is most likely caused by Clang.
-Run brew config to see which Clang is installed on your Mac. If Clang isn't installed, that's causing the error.
-Start by installing additional XCode command line tools, followed by brew install llvm.
-
-```bash
-xcode-select --install
-brew install llvm
-```
-
-If brew config has Clang installed, update to the latest llvm and try creating the environment again.
-
-#### `configure_invokeai.py` or `invoke.py` crashes at an early stage
-
-This is usually due to an incomplete or corrupted Conda install. Make sure you
-have linked to the correct environment file and run `conda update` again.
-
-If the problem persists, a more extreme measure is to clear Conda's caches and
-remove the `invokeai` environment:
-
-```bash
-conda deactivate
-conda env remove -n invokeai
-conda clean -a
-conda update
-```
-
-This removes all cached library files, including ones that may have been
-corrupted somehow. (This is not supposed to happen, but does anyway).
-
-#### `invoke.py` crashes at a later stage
-
-If the CLI or web site had been working ok, but something unexpected happens
-later on during the session, you've encountered a code bug that is probably
-unrelated to an install issue. Please search
-[Issues](https://github.com/invoke-ai/InvokeAI/issues), file a bug report, or
-ask for help on [Discord](https://discord.gg/ZmtBAhwWhy)
-
-#### My renders are running very slowly
-
-You may have installed the wrong torch (machine learning) package, and the
-system is running on CPU rather than the GPU. To check, look at the log messages
-that appear when `invoke.py` is first starting up. One of the earlier lines
-should say `Using device type cuda`. On AMD systems, it will also say "cuda",
-and on Macintoshes, it should say "mps". If instead the message says it is
-running on "cpu", then you may need to install the correct torch library.
-
-You may be able to fix this by installing a different torch library. Here are
-the magic incantations for Conda and PIP.
-
-!!! todo "For CUDA systems"
-
- - conda
-
- ```bash
- conda install pytorch torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia
- ```
-
- - pip
-
- ```bash
- pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116
- ```
-
-!!! todo "For AMD systems"
-
- - conda
-
- ```bash
- conda activate invokeai
- pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
- ```
-
- - pip
-
- ```bash
- pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
- ```
-
-More information and troubleshooting tips can be found at https://pytorch.org.
diff --git a/docs/installation/INSTALL_MANUAL.md b/docs/installation/INSTALL_MANUAL.md
new file mode 120000
index 0000000000..8f033ebba2
--- /dev/null
+++ b/docs/installation/INSTALL_MANUAL.md
@@ -0,0 +1 @@
+020_INSTALL_MANUAL.md
\ No newline at end of file
diff --git a/docs/installation/INSTALL_INVOKE.md b/docs/installation/deprecated_documentation/INSTALL_BINARY.md
similarity index 98%
rename from docs/installation/INSTALL_INVOKE.md
rename to docs/installation/deprecated_documentation/INSTALL_BINARY.md
index 30d52daa18..bc12f3d848 100644
--- a/docs/installation/INSTALL_INVOKE.md
+++ b/docs/installation/deprecated_documentation/INSTALL_BINARY.md
@@ -10,7 +10,7 @@ InvokeAI is released, you will download and reinstall the new version.
If you wish to tinker with unreleased versions of InvokeAI that introduce
potentially unstable new features, you should consider using the
[source installer](INSTALL_SOURCE.md) or one of the
-[manual install](INSTALL_MANUAL.md) methods.
+[manual install](../020_INSTALL_MANUAL.md) methods.
**Important Caveats**
- This script does not support AMD GPUs. For Linux AMD support,
diff --git a/docs/installation/INSTALL_JUPYTER.md b/docs/installation/deprecated_documentation/INSTALL_JUPYTER.md
similarity index 100%
rename from docs/installation/INSTALL_JUPYTER.md
rename to docs/installation/deprecated_documentation/INSTALL_JUPYTER.md
diff --git a/docs/installation/older_docs_to_be_removed/INSTALL_LINUX.md b/docs/installation/deprecated_documentation/INSTALL_LINUX.md
similarity index 100%
rename from docs/installation/older_docs_to_be_removed/INSTALL_LINUX.md
rename to docs/installation/deprecated_documentation/INSTALL_LINUX.md
diff --git a/docs/installation/older_docs_to_be_removed/INSTALL_MAC.md b/docs/installation/deprecated_documentation/INSTALL_MAC.md
similarity index 100%
rename from docs/installation/older_docs_to_be_removed/INSTALL_MAC.md
rename to docs/installation/deprecated_documentation/INSTALL_MAC.md
diff --git a/docs/installation/INSTALL_PCP.md b/docs/installation/deprecated_documentation/INSTALL_PCP.md
similarity index 100%
rename from docs/installation/INSTALL_PCP.md
rename to docs/installation/deprecated_documentation/INSTALL_PCP.md
diff --git a/docs/installation/INSTALL_SOURCE.md b/docs/installation/deprecated_documentation/INSTALL_SOURCE.md
similarity index 97%
rename from docs/installation/INSTALL_SOURCE.md
rename to docs/installation/deprecated_documentation/INSTALL_SOURCE.md
index 3629a8c8be..2b1b750fbf 100644
--- a/docs/installation/INSTALL_SOURCE.md
+++ b/docs/installation/deprecated_documentation/INSTALL_SOURCE.md
@@ -12,7 +12,7 @@ of Linux, MacOS or Windows. It will leave you with a version that runs a stable
version of InvokeAI with the option to upgrade to experimental versions later.
Before you begin, make sure that you meet the
-[hardware requirements](index.md#Hardware_Requirements) and has the appropriate
+[hardware requirements](../../index.md#hardware-requirements) and has the appropriate
GPU drivers installed. In particular, if you are a Linux user with an AMD GPU
installed, you may need to install the
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
@@ -50,15 +50,15 @@ off the process.
inflating: invokeAI\readme.txt
```
-3. If you are a macOS user, you may need to install the Xcode command line tools.
- These are a set of tools that are needed to run certain applications in a Terminal,
+3. If you are a macOS user, you may need to install the Xcode command line tools.
+ These are a set of tools that are needed to run certain applications in a Terminal,
including InvokeAI. This package is provided directly by Apple.
-
+
To install, open a terminal window and run `xcode-select --install`. You will get
a macOS system popup guiding you through the install. If you already have them
- installed, you will instead see some output in the Terminal advising you that the
+ installed, you will instead see some output in the Terminal advising you that the
tools are already installed.
-
+
More information can be found here:
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
@@ -100,7 +100,7 @@ off the process.
If you have already downloaded the weights file(s) for another Stable
Diffusion distribution, you may skip this step (by selecting "skip" when
prompted) and configure InvokeAI to use the previously-downloaded files. The
- process for this is described in [Installing Models](INSTALLING_MODELS.md).
+ process for this is described in [Installing Models](../050_INSTALLING_MODELS.md).
8. The script will now exit and you'll be ready to generate some images. The
invokeAI directory will contain numerous files. Look for a shell script
@@ -128,7 +128,7 @@ python scripts/invoke.py --web --max_load_models=3 \
```
These options are described in detail in the
-[Command-Line Interface](../features/CLI.md) documentation.
+[Command-Line Interface](../../features/CLI.md) documentation.
## Troubleshooting
diff --git a/docs/installation/older_docs_to_be_removed/INSTALL_WINDOWS.md b/docs/installation/deprecated_documentation/INSTALL_WINDOWS.md
similarity index 100%
rename from docs/installation/older_docs_to_be_removed/INSTALL_WINDOWS.md
rename to docs/installation/deprecated_documentation/INSTALL_WINDOWS.md
diff --git a/docs/installation/index.md b/docs/installation/index.md
index 85690f29da..ef50cbab5f 100644
--- a/docs/installation/index.md
+++ b/docs/installation/index.md
@@ -5,14 +5,14 @@ title: Overview
We offer several ways to install InvokeAI, each one suited to your
experience and preferences.
-1. [Automated Installer](INSTALL_AUTOMATED.md)
+1. [Automated Installer](010_INSTALL_AUTOMATED.md)
This is a script that will install all of InvokeAI's essential
third party libraries and InvokeAI itself. It includes access to a
"developer console" which will help us debug problems with you and
give you to access experimental features.
-2. [Manual Installation](INSTALL_MANUAL.md)
+2. [Manual Installation](020_INSTALL_MANUAL.md)
In this method you will manually run the commands needed to install
InvokeAI and its dependencies. We offer two recipes: one suited to
@@ -25,10 +25,9 @@ experience and preferences.
the cutting edge of future InvokeAI development and is willing to put
up with occasional glitches and breakage.
-3. [Docker Installation](INSTALL_DOCKER.md)
+3. [Docker Installation](040_INSTALL_DOCKER.md)
We also offer a method for creating Docker containers containing
InvokeAI and its dependencies. This method is recommended for
individuals with experience with Docker containers and understand
the pluses and minuses of a container-based install.
-
diff --git a/environments-and-requirements/environment-lin-aarch64.yml b/environments-and-requirements/environment-lin-aarch64.yml
index ae798d777c..01fc4c30c2 100644
--- a/environments-and-requirements/environment-lin-aarch64.yml
+++ b/environments-and-requirements/environment-lin-aarch64.yml
@@ -30,7 +30,6 @@ dependencies:
- torchvision
- transformers~=4.25
- pip:
- - dependency_injector==4.40.0
- getpass_asterisk
- omegaconf==2.1.1
- picklescan
@@ -42,5 +41,5 @@ dependencies:
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
- - git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
+ - git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
- -e .
diff --git a/environments-and-requirements/environment-lin-amd.yml b/environments-and-requirements/environment-lin-amd.yml
index 2eb4b1b148..1ad046ea84 100644
--- a/environments-and-requirements/environment-lin-amd.yml
+++ b/environments-and-requirements/environment-lin-amd.yml
@@ -10,7 +10,6 @@ dependencies:
- pip:
- --extra-index-url https://download.pytorch.org/whl/rocm5.2/
- albumentations==0.4.3
- - dependency_injector==4.40.0
- diffusers~=0.10
- einops==0.3.0
- eventlet
@@ -44,5 +43,5 @@ dependencies:
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
- - git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
+ - git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
- -e .
diff --git a/environments-and-requirements/environment-lin-cuda.yml b/environments-and-requirements/environment-lin-cuda.yml
index 0d51656cb0..8bb073d832 100644
--- a/environments-and-requirements/environment-lin-cuda.yml
+++ b/environments-and-requirements/environment-lin-cuda.yml
@@ -14,7 +14,6 @@ dependencies:
- pip:
- accelerate~=0.13
- albumentations==0.4.3
- - dependency_injector==4.40.0
- diffusers~=0.10
- einops==0.3.0
- eventlet
@@ -44,5 +43,5 @@ dependencies:
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
- - git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
+ - git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
- -e .
diff --git a/environments-and-requirements/environment-mac.yml b/environments-and-requirements/environment-mac.yml
index f6d9816843..18de1ce6c4 100644
--- a/environments-and-requirements/environment-mac.yml
+++ b/environments-and-requirements/environment-mac.yml
@@ -59,7 +59,7 @@ dependencies:
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
- - git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
+ - git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
- -e .
variables:
PYTORCH_ENABLE_MPS_FALLBACK: 1
diff --git a/environments-and-requirements/environment-win-cuda.yml b/environments-and-requirements/environment-win-cuda.yml
index 9c041500a1..dd1404e1d0 100644
--- a/environments-and-requirements/environment-win-cuda.yml
+++ b/environments-and-requirements/environment-win-cuda.yml
@@ -13,7 +13,6 @@ dependencies:
- cudatoolkit=11.6
- pip:
- albumentations==0.4.3
- - dependency_injector==4.40.0
- diffusers~=0.10
- einops==0.3.0
- eventlet
@@ -43,5 +42,5 @@ dependencies:
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.1#egg=gfpgan
- - git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
+ - git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
- -e .
diff --git a/environments-and-requirements/requirements-base.txt b/environments-and-requirements/requirements-base.txt
index 9d75a286d8..a7b9d48c13 100644
--- a/environments-and-requirements/requirements-base.txt
+++ b/environments-and-requirements/requirements-base.txt
@@ -1,6 +1,5 @@
# pip will resolve the version which matches torch
albumentations
-dependency_injector==4.40.0
diffusers[torch]~=0.10
einops
eventlet
@@ -35,6 +34,6 @@ torch-fidelity
torchmetrics
transformers~=4.25
https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip#egg=k-diffusion
-https://github.com/invoke-ai/PyPatchMatch/archive/refs/tags/0.1.4.zip#egg=pypatchmatch
+https://github.com/invoke-ai/PyPatchMatch/archive/refs/tags/0.1.5.zip#egg=pypatchmatch
https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip#egg=clip
https://github.com/invoke-ai/clipseg/archive/relaxed-python-requirement.zip#egg=clipseg
diff --git a/frontend/.eslintrc.cjs b/frontend/.eslintrc.cjs
index ba77c4481e..11da47aa86 100644
--- a/frontend/.eslintrc.cjs
+++ b/frontend/.eslintrc.cjs
@@ -1,6 +1,13 @@
module.exports = {
- extends: ['eslint:recommended', 'plugin:@typescript-eslint/recommended', 'plugin:react-hooks/recommended'],
+ extends: [
+ 'eslint:recommended',
+ 'plugin:@typescript-eslint/recommended',
+ 'plugin:react-hooks/recommended',
+ ],
parser: '@typescript-eslint/parser',
plugins: ['@typescript-eslint', 'eslint-plugin-react-hooks'],
root: true,
+ rules: {
+ '@typescript-eslint/no-unused-vars': ['warn', { varsIgnorePattern: '_+' }],
+ },
};
diff --git a/frontend/index.html b/frontend/index.html
index b8776b3bfb..8314e22a7d 100644
--- a/frontend/index.html
+++ b/frontend/index.html
@@ -1,16 +1,14 @@
+
+
+
+ InvokeAI - A Stable Diffusion Toolkit
+
+
-
-
-
- InvokeAI - A Stable Diffusion Toolkit
-
-
-
-
-
-
-
-
-
\ No newline at end of file
+
+
+
+
+