From cdb3e18b807e94fdbdf0f93a3f6bdfa0842e2c07 Mon Sep 17 00:00:00 2001 From: mauwii Date: Mon, 6 Feb 2023 23:46:53 +0100 Subject: [PATCH 01/52] add flavor to pip cache id to prevent cache invalidation --- .github/workflows/build-container.yml | 3 ++- docker/Dockerfile | 33 ++++++++++++++------------- docker/build.sh | 7 +++--- docker/run.sh | 2 +- 4 files changed, 24 insertions(+), 21 deletions(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index b340768dd2..2ba9e588e2 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -3,7 +3,8 @@ on: push: branches: - 'main' - - 'update/ci/*' + - 'update/ci/docker/*' + - 'update/docker/*' tags: - 'v*.*.*' diff --git a/docker/Dockerfile b/docker/Dockerfile index d957e72334..7146a67981 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,4 +1,5 @@ # syntax=docker/dockerfile:1 + ARG PYTHON_VERSION=3.9 ################## ## base image ## @@ -6,19 +7,19 @@ ARG PYTHON_VERSION=3.9 FROM python:${PYTHON_VERSION}-slim AS python-base # prepare for buildkit cache -RUN rm -f /etc/apt/apt.conf.d/docker-clean +RUN rm -f /etc/apt/apt.conf.d/docker-clean \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache # Install necesarry packages RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ - apt-get update \ - && apt-get install \ - -yqq \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt update \ + && apt-get install -y \ --no-install-recommends \ libgl1-mesa-glx=20.3.* \ libglib2.0-0=2.66.* \ - libopencv-dev=4.5.* \ - && rm -rf /var/lib/apt/lists/* + libopencv-dev=4.5.* # set working directory and path ARG APPDIR=/usr/src @@ -34,24 +35,24 @@ ENV PIP_USE_PEP517=1 # prepare for buildkit cache ARG PIP_CACHE_DIR=/var/cache/buildkit/pip +ARG CONTAINER_FLAVOR=cuda ENV PIP_CACHE_DIR ${PIP_CACHE_DIR} RUN mkdir -p ${PIP_CACHE_DIR} # Install dependencies RUN \ - --mount=type=cache,target=${PIP_CACHE_DIR} \ + --mount=type=cache,target=${PIP_CACHE_DIR},id=pip-${CONTAINER_FLAVOR} \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ - apt-get update \ - && apt-get install \ - -yqq \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt update \ + && apt-get install -y \ --no-install-recommends \ build-essential=12.9 \ gcc=4:10.2.* \ - python3-dev=3.9.* \ - && rm -rf /var/lib/apt/lists/* + python3-dev=3.9.* # create virtual environment -RUN --mount=type=cache,target=${PIP_CACHE_DIR} \ +RUN --mount=type=cache,target=${PIP_CACHE_DIR},id=pip-${CONTAINER_FLAVOR} \ python3 -m venv "${APPNAME}" \ --upgrade-deps @@ -62,8 +63,8 @@ COPY --link . . ARG PIP_EXTRA_INDEX_URL ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL} ARG PIP_PACKAGE=. -RUN --mount=type=cache,target=${PIP_CACHE_DIR} \ - "${APPDIR}/${APPNAME}/bin/pip" install ${PIP_PACKAGE} +RUN --mount=type=cache,target=${PIP_CACHE_DIR},id=pip-${CONTAINER_FLAVOR} \ + "${APPNAME}/bin/pip" install ${PIP_PACKAGE} # build patchmatch RUN python3 -c "from patchmatch import patch_match" @@ -74,7 +75,7 @@ RUN python3 -c "from patchmatch import patch_match" FROM python-base AS runtime # setup environment -COPY --from=pyproject-builder --link ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME} +COPY --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME} ENV INVOKEAI_ROOT=/data ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only" diff --git a/docker/build.sh b/docker/build.sh index dc1a1dcc78..1ad51b387a 100755 --- a/docker/build.sh +++ b/docker/build.sh @@ -36,9 +36,10 @@ fi # Build Container DOCKER_BUILDKIT=1 docker build \ - --platform="${PLATFORM}" \ - --tag="${CONTAINER_IMAGE}" \ + --platform="${PLATFORM:-Linux/amd64}" \ + --tag="${CONTAINER_IMAGE:-invokeai}" \ + ${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \ ${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \ ${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \ - --file="${DOCKERFILE}" \ + --file="${DOCKERFILE:-./Dockerfile}" \ .. diff --git a/docker/run.sh b/docker/run.sh index 5593faaa3e..f769976b1c 100755 --- a/docker/run.sh +++ b/docker/run.sh @@ -28,4 +28,4 @@ docker run \ --publish=9090:9090 \ --cap-add=sys_nice \ ${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \ - "${CONTAINER_IMAGE}" ${1:+$@} + "${CONTAINER_IMAGE}" ${@:+$@} From 07a9062e1fcecec8abf97570633bc01c2f5c90c3 Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 7 Feb 2023 02:07:40 +0100 Subject: [PATCH 02/52] update .dockerignore and scripts --- .dockerignore | 8 ++++---- docker/build.sh | 11 ++++++----- docker/env.sh | 8 +++++++- docker/run.sh | 2 +- 4 files changed, 18 insertions(+), 11 deletions(-) diff --git a/.dockerignore b/.dockerignore index 429ca125c0..c248f6d8ed 100644 --- a/.dockerignore +++ b/.dockerignore @@ -11,13 +11,13 @@ # ignore frontend but whitelist dist invokeai/frontend/** -!invokeai/frontend/dist +!invokeai/frontend/dist/** # ignore invokeai/assets but whitelist invokeai/assets/web -invokeai/assets -!invokeai/assets/web +invokeai/assets/** +!invokeai/assets/web/*.png -# ignore python cache +# ignore python cache and build artifacts **/__pycache__ **/*.py[cod] **/*.egg-info diff --git a/docker/build.sh b/docker/build.sh index 1ad51b387a..1bc233f222 100755 --- a/docker/build.sh +++ b/docker/build.sh @@ -8,12 +8,12 @@ set -e # CPU: https://download.pytorch.org/whl/cpu # as found on https://pytorch.org/get-started/locally/ -SCRIPTDIR=$(dirname "$0") +SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}") cd "$SCRIPTDIR" || exit 1 source ./env.sh -DOCKERFILE=${INVOKE_DOCKERFILE:-Dockerfile} +DOCKERFILE=${INVOKE_DOCKERFILE:-./Dockerfile} # print the settings echo -e "You are using these values:\n" @@ -21,9 +21,10 @@ echo -e "Dockerfile:\t\t${DOCKERFILE}" echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}" echo -e "Volumename:\t\t${VOLUMENAME}" echo -e "Platform:\t\t${PLATFORM}" -echo -e "Registry:\t\t${CONTAINER_REGISTRY}" -echo -e "Repository:\t\t${CONTAINER_REPOSITORY}" +echo -e "Container Registry:\t${CONTAINER_REGISTRY}" +echo -e "Container Repository:\t${CONTAINER_REPOSITORY}" echo -e "Container Tag:\t\t${CONTAINER_TAG}" +echo -e "Container Flavor:\t${CONTAINER_FLAVOR}" echo -e "Container Image:\t${CONTAINER_IMAGE}\n" # Create docker volume @@ -41,5 +42,5 @@ DOCKER_BUILDKIT=1 docker build \ ${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \ ${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \ ${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \ - --file="${DOCKERFILE:-./Dockerfile}" \ + --file="${DOCKERFILE}" \ .. diff --git a/docker/env.sh b/docker/env.sh index d6b0699ce5..f6a7a2b206 100644 --- a/docker/env.sh +++ b/docker/env.sh @@ -1,6 +1,11 @@ #!/usr/bin/env bash if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then + # Activate virtual environment if not already activated + if [[ -z $VIRTUAL_ENV ]]; then + [[ -e "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" ]] \ + && source "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" + fi # Decide which container flavor to build if not specified if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then # Check for CUDA and ROCm @@ -26,7 +31,8 @@ fi # Variables shared by build.sh and run.sh REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}" -VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME,,}_data"}" +REPOSITORY_NAME="${REPOSITORY_NAME,,}" +VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME}_data"}" ARCH="${ARCH-$(uname -m)}" PLATFORM="${PLATFORM-Linux/${ARCH}}" INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}" diff --git a/docker/run.sh b/docker/run.sh index f769976b1c..7d3b0990ab 100755 --- a/docker/run.sh +++ b/docker/run.sh @@ -4,7 +4,7 @@ set -e # How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container # IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!! -SCRIPTDIR=$(dirname "$0") +SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}") cd "$SCRIPTDIR" || exit 1 source ./env.sh From 75b919237b5dc7ad5226d06bfcfbd9dab5ddf18f Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 7 Feb 2023 03:39:50 +0100 Subject: [PATCH 03/52] update `cache-from` --- .github/workflows/build-container.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 2ba9e588e2..769a026fc4 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -79,7 +79,9 @@ jobs: tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }} - cache-from: type=gha + cache-from: | + type=gha + ghcr.io/${{ github.repository }}:main-${{ matrix.flavor }} cache-to: type=gha,mode=max - name: Output image, digest and metadata to summary From ee4cb5fdc9b1676b22fcf4cf61d3c590e3ece4db Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 7 Feb 2023 03:47:46 +0100 Subject: [PATCH 04/52] add id to `Build container` --- .github/workflows/build-container.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 769a026fc4..8e1d5f64e3 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -70,6 +70,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Build container + id: docker_build uses: docker/build-push-action@v4 with: context: . From f0212cd36174b375685d4cfe72979f576f332797 Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 7 Feb 2023 03:50:47 +0100 Subject: [PATCH 05/52] update Dockerfile --- docker/Dockerfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 7146a67981..ad20c91270 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -25,7 +25,7 @@ RUN \ ARG APPDIR=/usr/src ARG APPNAME=InvokeAI WORKDIR ${APPDIR} -ENV PATH=${APPDIR}/${APPNAME}/bin:$PATH +ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH ####################### ## build pyproject ## @@ -42,8 +42,8 @@ RUN mkdir -p ${PIP_CACHE_DIR} # Install dependencies RUN \ --mount=type=cache,target=${PIP_CACHE_DIR},id=pip-${CONTAINER_FLAVOR} \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,from=python-base \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,from=python-base \ apt update \ && apt-get install -y \ --no-install-recommends \ @@ -75,7 +75,7 @@ RUN python3 -c "from patchmatch import patch_match" FROM python-base AS runtime # setup environment -COPY --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME} +COPY --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME} ENV INVOKEAI_ROOT=/data ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only" From 59486615ddfd28e76a2404ddd980759ac52570fc Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 7 Feb 2023 04:29:44 +0100 Subject: [PATCH 06/52] update `build-container.yml` --- .github/workflows/build-container.yml | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 8e1d5f64e3..7b9abb3ca7 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -21,18 +21,15 @@ jobs: include: - flavor: amd pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2' - dockerfile: docker/Dockerfile - platforms: linux/amd64,linux/arm64 - flavor: cuda pip-extra-index-url: '' - dockerfile: docker/Dockerfile - platforms: linux/amd64,linux/arm64 - flavor: cpu pip-extra-index-url: 'https://download.pytorch.org/whl/cpu' - dockerfile: docker/Dockerfile - platforms: linux/amd64,linux/arm64 runs-on: ubuntu-latest name: ${{ matrix.flavor }} + env: + PLATFORMS: 'linux/amd64,linux/arm64' + DOCKERFILE: 'docker/Dockerfile' steps: - name: Checkout uses: actions/checkout@v3 @@ -59,7 +56,7 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 with: - platforms: ${{ matrix.platforms }} + platforms: ${{ env.PLATFORMS }} - name: Login to GitHub Container Registry if: github.event_name != 'pull_request' @@ -74,15 +71,15 @@ jobs: uses: docker/build-push-action@v4 with: context: . - file: ${{ matrix.dockerfile }} - platforms: ${{ matrix.platforms }} + file: ${{ env.DOCKERFILE }} + platforms: ${{ env.PLATFORMS }} push: ${{ github.event_name != 'pull_request' }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }} cache-from: | type=gha - ghcr.io/${{ github.repository }}:main-${{ matrix.flavor }} + type=registry,ref=ghcr.io/${{ github.repository }}:main-${{ matrix.flavor }} cache-to: type=gha,mode=max - name: Output image, digest and metadata to summary From c52d11b24c0cc4ce82c161e41314d6be28e93473 Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 7 Feb 2023 04:58:44 +0100 Subject: [PATCH 07/52] optionally push to DockerHub --- .github/workflows/build-container.yml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 7b9abb3ca7..b43672332d 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -39,7 +39,9 @@ jobs: uses: docker/metadata-action@v4 with: github-token: ${{ secrets.GITHUB_TOKEN }} - images: ghcr.io/${{ github.repository }} + images: | + ghcr.io/${{ github.repository }} + ${{ vars.DOCKER_HUB }} tags: | type=ref,event=branch type=ref,event=tag @@ -66,6 +68,13 @@ jobs: username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} + - name: Login to Docker Hub + if: github.event_name != 'pull_request' && vars.DOCKER_HUB != '' + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Build container id: docker_build uses: docker/build-push-action@v4 From 7bc0f7cc6cb780b089104a25cce770e5197352c0 Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 7 Feb 2023 05:38:48 +0100 Subject: [PATCH 08/52] update Docker Hub description --- .github/workflows/build-container.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index b43672332d..bd03934e3d 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -52,6 +52,7 @@ jobs: flavor: | latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }} suffix=-${{ matrix.flavor }},onlatest=false + - name: Set up QEMU uses: docker/setup-qemu-action@v2 @@ -91,6 +92,15 @@ jobs: type=registry,ref=ghcr.io/${{ github.repository }}:main-${{ matrix.flavor }} cache-to: type=gha,mode=max + - name: Docker Hub Description + if: github.event_name != 'pull_request' && vars.DOCKER_HUB != '' + uses: peter-evans/dockerhub-description@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + repository: ${{ vars.DOCKER_HUB }} + short-description: ${{ github.event.repository.description }} + - name: Output image, digest and metadata to summary run: | { From 9150f9ef3cc3d0383e9aa6c7bb2be2942a94cd6c Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 7 Feb 2023 05:41:40 +0100 Subject: [PATCH 09/52] move LABEL to top --- docker/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index ad20c91270..e46970f786 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -6,6 +6,8 @@ ARG PYTHON_VERSION=3.9 ################## FROM python:${PYTHON_VERSION}-slim AS python-base +LABEL org.opencontainers.image.authors="mauwii@outlook.de" + # prepare for buildkit cache RUN rm -f /etc/apt/apt.conf.d/docker-clean \ && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache @@ -83,5 +85,3 @@ ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only" ENTRYPOINT [ "invokeai" ] CMD [ "--web", "--host=0.0.0.0" ] VOLUME [ "/data" ] - -LABEL org.opencontainers.image.authors="mauwii@outlook.de" From 1d10d952b2585b019c5e51fd275903de3c629ff0 Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 7 Feb 2023 05:56:11 +0100 Subject: [PATCH 10/52] use cleartext DOCKERHUB_USERNAME --- .github/workflows/build-container.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index bd03934e3d..273182b8cf 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -41,7 +41,7 @@ jobs: github-token: ${{ secrets.GITHUB_TOKEN }} images: | ghcr.io/${{ github.repository }} - ${{ vars.DOCKER_HUB }} + ${{ vars.DOCKERHUB_REPOSITORY }} tags: | type=ref,event=branch type=ref,event=tag @@ -70,10 +70,10 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Login to Docker Hub - if: github.event_name != 'pull_request' && vars.DOCKER_HUB != '' + if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != '' uses: docker/login-action@v2 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} + username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build container @@ -93,12 +93,12 @@ jobs: cache-to: type=gha,mode=max - name: Docker Hub Description - if: github.event_name != 'pull_request' && vars.DOCKER_HUB != '' + if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != '' uses: peter-evans/dockerhub-description@v3 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} + username: ${{ vars.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - repository: ${{ vars.DOCKER_HUB }} + repository: ${{ vars.DOCKERHUB_REPOSITORY }} short-description: ${{ github.event.repository.description }} - name: Output image, digest and metadata to summary From b1612afff4f281cb437f25035c39741f3becea8d Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 7 Feb 2023 05:58:43 +0100 Subject: [PATCH 11/52] update .dockerignore --- .dockerignore | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.dockerignore b/.dockerignore index c248f6d8ed..28c8b60ca4 100644 --- a/.dockerignore +++ b/.dockerignore @@ -19,5 +19,7 @@ invokeai/assets/** # ignore python cache and build artifacts **/__pycache__ -**/*.py[cod] +**/*.pyc +**/*.pyo +**/*.pyd **/*.egg-info From bde94347d3706b046a31958d94925822fbbe140f Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 7 Feb 2023 06:00:40 +0100 Subject: [PATCH 12/52] don't use `--link`in COPY --- docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index e46970f786..8c4455e213 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -59,7 +59,7 @@ RUN --mount=type=cache,target=${PIP_CACHE_DIR},id=pip-${CONTAINER_FLAVOR} \ --upgrade-deps # copy sources -COPY --link . . +COPY . . # install pyproject.toml ARG PIP_EXTRA_INDEX_URL From 17e19302298f266eb2bf838e163a14da488faea1 Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 7 Feb 2023 06:43:12 +0100 Subject: [PATCH 13/52] remove CONTAINER_FLAVOR build arg also disable currently unused PIP_PACKAGE build arg will start using it when problems with XFORMERS are sorted out --- docker/Dockerfile | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 8c4455e213..0507363f69 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -43,9 +43,9 @@ RUN mkdir -p ${PIP_CACHE_DIR} # Install dependencies RUN \ - --mount=type=cache,target=${PIP_CACHE_DIR},id=pip-${CONTAINER_FLAVOR} \ - --mount=type=cache,target=/var/cache/apt,sharing=locked,from=python-base \ - --mount=type=cache,target=/var/lib/apt,sharing=locked,from=python-base \ + --mount=type=cache,target=${PIP_CACHE_DIR} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt update \ && apt-get install -y \ --no-install-recommends \ @@ -54,7 +54,7 @@ RUN \ python3-dev=3.9.* # create virtual environment -RUN --mount=type=cache,target=${PIP_CACHE_DIR},id=pip-${CONTAINER_FLAVOR} \ +RUN --mount=type=cache,target=${PIP_CACHE_DIR} \ python3 -m venv "${APPNAME}" \ --upgrade-deps @@ -64,9 +64,9 @@ COPY . . # install pyproject.toml ARG PIP_EXTRA_INDEX_URL ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL} -ARG PIP_PACKAGE=. -RUN --mount=type=cache,target=${PIP_CACHE_DIR},id=pip-${CONTAINER_FLAVOR} \ - "${APPNAME}/bin/pip" install ${PIP_PACKAGE} +# ARG PIP_PACKAGE=. +RUN --mount=type=cache,target=${PIP_CACHE_DIR} \ + "${APPNAME}/bin/pip" install . # build patchmatch RUN python3 -c "from patchmatch import patch_match" From c2e11dfe8354d78c0f8eb69983445278ef2ba143 Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 7 Feb 2023 07:08:23 +0100 Subject: [PATCH 14/52] update build-container.yml - add long sha tag - update cache-from Dockerfile: - re-use `apt-get update` env.sh/build.sh: - rename platform to lowercase --- .github/workflows/build-container.yml | 3 ++- docker/Dockerfile | 4 ++-- docker/build.sh | 2 +- docker/env.sh | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 273182b8cf..646ed7c03e 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -48,6 +48,7 @@ jobs: type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}} + type=sha,format=long type=sha,enable=true,prefix=sha-,format=short flavor: | latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }} @@ -89,7 +90,7 @@ jobs: build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }} cache-from: | type=gha - type=registry,ref=ghcr.io/${{ github.repository }}:main-${{ matrix.flavor }} + ghcr.io/${{ github.repository }}:main-${{ matrix.flavor }} cache-to: type=gha,mode=max - name: Docker Hub Description diff --git a/docker/Dockerfile b/docker/Dockerfile index 0507363f69..9b5a87f4a4 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -16,7 +16,7 @@ RUN rm -f /etc/apt/apt.conf.d/docker-clean \ RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt update \ + apt-get update \ && apt-get install -y \ --no-install-recommends \ libgl1-mesa-glx=20.3.* \ @@ -46,7 +46,7 @@ RUN \ --mount=type=cache,target=${PIP_CACHE_DIR} \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt update \ + apt-get update \ && apt-get install -y \ --no-install-recommends \ build-essential=12.9 \ diff --git a/docker/build.sh b/docker/build.sh index 1bc233f222..dabbd8378a 100755 --- a/docker/build.sh +++ b/docker/build.sh @@ -37,7 +37,7 @@ fi # Build Container DOCKER_BUILDKIT=1 docker build \ - --platform="${PLATFORM:-Linux/amd64}" \ + --platform="${PLATFORM:-linux/amd64}" \ --tag="${CONTAINER_IMAGE:-invokeai}" \ ${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \ ${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \ diff --git a/docker/env.sh b/docker/env.sh index f6a7a2b206..f09c629112 100644 --- a/docker/env.sh +++ b/docker/env.sh @@ -34,7 +34,7 @@ REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)" REPOSITORY_NAME="${REPOSITORY_NAME,,}" VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME}_data"}" ARCH="${ARCH-$(uname -m)}" -PLATFORM="${PLATFORM-Linux/${ARCH}}" +PLATFORM="${PLATFORM-linux/${ARCH}}" INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}" CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}" CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}" From 42da4f57c213a48c5614d4ac04ab82fd9cfa1dcd Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 7 Feb 2023 07:19:57 +0100 Subject: [PATCH 15/52] update .dockerignore --- .dockerignore | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/.dockerignore b/.dockerignore index 28c8b60ca4..81301571b8 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,23 +3,23 @@ !invokeai !ldm !pyproject.toml -!README.md # Guard against pulling in any models that might exist in the directory tree **/*.pt* **/*.ckpt # ignore frontend but whitelist dist -invokeai/frontend/** -!invokeai/frontend/dist/** +invokeai/frontend/ +!invokeai/frontend/dist/ # ignore invokeai/assets but whitelist invokeai/assets/web -invokeai/assets/** -!invokeai/assets/web/*.png +invokeai/assets/ +!invokeai/assets/web/ -# ignore python cache and build artifacts -**/__pycache__ -**/*.pyc -**/*.pyo -**/*.pyd -**/*.egg-info +# Byte-compiled / optimized / DLL files +**/__pycache__/ +**/*.py[cod] + +# Distribution / packaging +*.egg-info/ +*.egg From 7f41893da443ec5d7e895bd3e084d742889c4b4f Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 7 Feb 2023 07:40:18 +0100 Subject: [PATCH 16/52] set scope for caches --- .github/workflows/build-container.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 646ed7c03e..e2c5be27c2 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -88,10 +88,8 @@ jobs: tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }} - cache-from: | - type=gha - ghcr.io/${{ github.repository }}:main-${{ matrix.flavor }} - cache-to: type=gha,mode=max + cache-from: type=gha,scope=$GITHUB_REF_NAME-${{ matrix.flavor }} + cache-to: type=gha,mode=max,scope=$GITHUB_REF_NAME-${{ matrix.flavor }} - name: Docker Hub Description if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != '' From f92f62a91bef760a5bab3ec650cb9bb6da25a9e9 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 6 Feb 2023 09:35:23 -0500 Subject: [PATCH 17/52] enhance model_manager support for converting inpainting ckpt files Previously conversions of .ckpt and .safetensors files to diffusers models were failing with channel mismatch errors. This is corrected with this PR. - The model_manager convert_and_import() method now accepts the path to the checkpoint file's configuration file, using the parameter `original_config_file`. For inpainting files this should be set to the full path to `v1-inpainting-inference.yaml`. - If no configuration file is provided in the call, then the presence of an inpainting file will be inferred at the `ldm.ckpt_to_diffuser.convert_ckpt_to_diffUser()` level by looking for the string "inpaint" in the path. AUTO1111 does something similar to this, but it is brittle and not recommended. - This PR also changes the model manager model_names() method to return the model names in case folded sort order. --- ldm/invoke/CLI.py | 14 +- ldm/invoke/ckpt_to_diffuser.py | 15 +- ldm/invoke/model_manager.py | 842 ++++++++++++++++++--------------- 3 files changed, 494 insertions(+), 377 deletions(-) diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index 8f971534f7..fd61c7c8bf 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -712,10 +712,12 @@ def _get_model_name_and_desc(model_manager,completer,model_name:str='',model_des def optimize_model(model_name_or_path:str, gen, opt, completer): manager = gen.model_manager ckpt_path = None + original_config_file = None if (model_info := manager.model_info(model_name_or_path)): if 'weights' in model_info: ckpt_path = Path(model_info['weights']) + original_config_file = Path(model_info['config']) model_name = model_name_or_path model_description = model_info['description'] else: @@ -723,12 +725,18 @@ def optimize_model(model_name_or_path:str, gen, opt, completer): return elif os.path.exists(model_name_or_path): ckpt_path = Path(model_name_or_path) - model_name,model_description = _get_model_name_and_desc( + model_name, model_description = _get_model_name_and_desc( manager, completer, ckpt_path.stem, f'Converted model {ckpt_path.stem}' ) + is_inpainting = input('Is this an inpainting model? [n] ').startswith(('y','Y')) + original_config_file = Path( + 'configs', + 'stable-diffusion', + 'v1-inpainting-inference.yaml' if is_inpainting else 'v1-inference.yaml' + ) else: print(f'** {model_name_or_path} is neither an existing model nor the path to a .ckpt file') return @@ -736,6 +744,9 @@ def optimize_model(model_name_or_path:str, gen, opt, completer): if not ckpt_path.is_absolute(): ckpt_path = Path(Globals.root,ckpt_path) + if original_config_file and not original_config_file.is_absolute(): + original_config_file = Path(Globals.root,original_config_file) + diffuser_path = Path(Globals.root, 'models',Globals.converted_ckpts_dir,model_name) if diffuser_path.exists(): print(f'** {model_name_or_path} is already optimized. Will not overwrite. If this is an error, please remove the directory {diffuser_path} and try again.') @@ -751,6 +762,7 @@ def optimize_model(model_name_or_path:str, gen, opt, completer): model_name=model_name, model_description=model_description, vae = vae, + original_config_file = original_config_file, commit_to_conf=opt.conf, ) if not new_config: diff --git a/ldm/invoke/ckpt_to_diffuser.py b/ldm/invoke/ckpt_to_diffuser.py index 8f4dc3961c..dece856bc9 100644 --- a/ldm/invoke/ckpt_to_diffuser.py +++ b/ldm/invoke/ckpt_to_diffuser.py @@ -22,7 +22,11 @@ import re import torch import warnings from pathlib import Path -from ldm.invoke.globals import Globals, global_cache_dir +from ldm.invoke.globals import ( + Globals, + global_cache_dir, + global_config_dir, + ) from safetensors.torch import load_file from typing import Union @@ -826,6 +830,8 @@ def load_pipeline_from_original_stable_diffusion_ckpt( :param upcast_attention: Whether the attention computation should always be upcasted. This is necessary when running stable diffusion 2.1. ''' + + print(f'DEBUG: original_config_file={original_config_file}') with warnings.catch_warnings(): warnings.simplefilter('ignore') @@ -852,13 +858,16 @@ def load_pipeline_from_original_stable_diffusion_ckpt( key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024: - original_config_file = os.path.join(Globals.root,'configs','stable-diffusion','v2-inference-v.yaml') + original_config_file = global_config_dir() / 'stable-diffusion' / 'v2-inference-v.yaml' if global_step == 110000: # v2.1 needs to upcast attention upcast_attention = True + elif str(checkpoint_path).lower().find('inpaint') >= 0: # brittle - please pass original_config_file parameter! + print(f' | checkpoint has "inpaint" in name, assuming an inpainting model') + original_config_file = global_config_dir() / 'stable-diffusion' / 'v1-inpainting-inference.yaml' else: - original_config_file = os.path.join(Globals.root,'configs','stable-diffusion','v1-inference.yaml') + original_config_file = global_config_dir() / 'stable-diffusion' / 'v1-inference.yaml' original_config = OmegaConf.load(original_config_file) diff --git a/ldm/invoke/model_manager.py b/ldm/invoke/model_manager.py index 69bfbd587d..3135931eea 100644 --- a/ldm/invoke/model_manager.py +++ b/ldm/invoke/model_manager.py @@ -1,9 +1,9 @@ -''' +""" Manage a cache of Stable Diffusion model files for fast switching. They are moved between GPU and CPU as necessary. If CPU memory falls below a preset minimum, the least recently used model will be cleared and loaded from disk when next needed. -''' +""" from __future__ import annotations import contextlib @@ -15,44 +15,51 @@ import sys import textwrap import time import warnings -import safetensors.torch from pathlib import Path from shutil import move, rmtree from typing import Any, Optional, Union -from huggingface_hub import scan_cache_dir -from ldm.util import download_with_progress_bar -import torch import safetensors +import safetensors.torch +import torch import transformers -from diffusers import AutoencoderKL, logging as dlogging -from diffusers.utils.logging import get_verbosity, set_verbosity, set_verbosity_error +from diffusers import AutoencoderKL +from diffusers import logging as dlogging +from diffusers.utils.logging import (get_verbosity, set_verbosity, + set_verbosity_error) +from huggingface_hub import scan_cache_dir from omegaconf import OmegaConf from omegaconf.dictconfig import DictConfig from picklescan.scanner import scan_file_path -from ldm.invoke.generator.diffusers_pipeline import StableDiffusionGeneratorPipeline -from ldm.invoke.globals import Globals, global_models_dir, global_autoscan_dir, global_cache_dir -from ldm.util import instantiate_from_config, ask_user +from ldm.invoke.generator.diffusers_pipeline import \ + StableDiffusionGeneratorPipeline +from ldm.invoke.globals import (Globals, global_autoscan_dir, global_cache_dir, + global_models_dir) +from ldm.util import (ask_user, download_with_progress_bar, + instantiate_from_config) + +DEFAULT_MAX_MODELS = 2 +VAE_TO_REPO_ID = { # hack, see note in convert_and_import() + "vae-ft-mse-840000-ema-pruned": "stabilityai/sd-vae-ft-mse", +} -DEFAULT_MAX_MODELS=2 -VAE_TO_REPO_ID = { # hack, see note in convert_and_import() - 'vae-ft-mse-840000-ema-pruned': 'stabilityai/sd-vae-ft-mse', - } class ModelManager(object): - def __init__(self, - config:OmegaConf, - device_type:str='cpu', - precision:str='float16', - max_loaded_models=DEFAULT_MAX_MODELS): - ''' + def __init__( + self, + config: OmegaConf, + device_type: str = "cpu", + precision: str = "float16", + max_loaded_models=DEFAULT_MAX_MODELS, + ): + """ Initialize with the path to the models.yaml config file, the torch device type, and precision. The optional min_avail_mem argument specifies how much unused system (CPU) memory to preserve. The cache of models in RAM will grow until this value is approached. Default is 2G. - ''' + """ # prevent nasty-looking CLIP log message transformers.logging.set_verbosity_error() self.config = config @@ -63,104 +70,106 @@ class ModelManager(object): self.stack = [] # this is an LRU FIFO self.current_model = None - def valid_model(self, model_name:str)->bool: - ''' + def valid_model(self, model_name: str) -> bool: + """ Given a model name, returns True if it is a valid identifier. - ''' + """ return model_name in self.config - def get_model(self, model_name:str): - ''' + def get_model(self, model_name: str): + """ Given a model named identified in models.yaml, return the model object. If in RAM will load into GPU VRAM. If on disk, will load from there. - ''' + """ if not self.valid_model(model_name): - print(f'** "{model_name}" is not a known model name. Please check your models.yaml file') + print( + f'** "{model_name}" is not a known model name. Please check your models.yaml file' + ) return self.current_model if self.current_model != model_name: - if model_name not in self.models: # make room for a new one + if model_name not in self.models: # make room for a new one self._make_cache_room() self.offload_model(self.current_model) if model_name in self.models: - requested_model = self.models[model_name]['model'] - print(f'>> Retrieving model {model_name} from system RAM cache') - self.models[model_name]['model'] = self._model_from_cpu(requested_model) - width = self.models[model_name]['width'] - height = self.models[model_name]['height'] - hash = self.models[model_name]['hash'] + requested_model = self.models[model_name]["model"] + print(f">> Retrieving model {model_name} from system RAM cache") + self.models[model_name]["model"] = self._model_from_cpu(requested_model) + width = self.models[model_name]["width"] + height = self.models[model_name]["height"] + hash = self.models[model_name]["hash"] - else: # we're about to load a new model, so potentially offload the least recently used one + else: # we're about to load a new model, so potentially offload the least recently used one requested_model, width, height, hash = self._load_model(model_name) self.models[model_name] = { - 'model': requested_model, - 'width': width, - 'height': height, - 'hash': hash, + "model": requested_model, + "width": width, + "height": height, + "hash": hash, } self.current_model = model_name self._push_newest_model(model_name) return { - 'model':requested_model, - 'width':width, - 'height':height, - 'hash': hash + "model": requested_model, + "width": width, + "height": height, + "hash": hash, } def default_model(self) -> str | None: - ''' + """ Returns the name of the default model, or None if none is defined. - ''' + """ for model_name in self.config: - if self.config[model_name].get('default'): + if self.config[model_name].get("default"): return model_name - def set_default_model(self,model_name:str) -> None: - ''' + def set_default_model(self, model_name: str) -> None: + """ Set the default model. The change will not take effect until you call model_manager.commit() - ''' + """ assert model_name in self.model_names(), f"unknown model '{model_name}'" config = self.config for model in config: - config[model].pop('default',None) - config[model_name]['default'] = True + config[model].pop("default", None) + config[model_name]["default"] = True - def model_info(self, model_name:str)->dict: - ''' + def model_info(self, model_name: str) -> dict: + """ Given a model name returns the OmegaConf (dict-like) object describing it. - ''' + """ if model_name not in self.config: return None return self.config[model_name] - def model_names(self)->list[str]: - ''' + def model_names(self) -> list[str]: + """ Return a list consisting of all the names of models defined in models.yaml - ''' + """ return list(self.config.keys()) - def is_legacy(self,model_name:str)->bool: - ''' + def is_legacy(self, model_name: str) -> bool: + """ Return true if this is a legacy (.ckpt) model - ''' + """ # if we are converting legacy files automatically, then # there are no legacy ckpts! if Globals.ckpt_convert: return False info = self.model_info(model_name) - if 'weights' in info and info['weights'].endswith(('.ckpt','.safetensors')): + if "weights" in info and info["weights"].endswith((".ckpt", ".safetensors")): return True return False def list_models(self) -> dict: - ''' + """ Return a dict of models in the format: { model_name1: {'status': ('active'|'cached'|'not loaded'), 'description': description, @@ -171,132 +180,142 @@ class ModelManager(object): model_manager.model_info('model-name') to get the stanza for the model named 'model-name', and model_manager.config to get the full OmegaConf object derived from models.yaml - ''' + """ models = {} - for name in sorted(self.config): + for name in sorted(self.config, key=str.casefold): stanza = self.config[name] # don't include VAEs in listing (legacy style) - if 'config' in stanza and '/VAE/' in stanza['config']: + if "config" in stanza and "/VAE/" in stanza["config"]: continue models[name] = dict() - format = stanza.get('format','ckpt') # Determine Format + format = stanza.get("format", "ckpt") # Determine Format # Common Attribs - description = stanza.get('description', None) + description = stanza.get("description", None) if self.current_model == name: - status = 'active' + status = "active" elif name in self.models: - status = 'cached' + status = "cached" else: - status = 'not loaded' + status = "not loaded" models[name].update( - description = description, - format = format, - status = status, + description=description, + format=format, + status=status, ) # Checkpoint Config Parse - if format == 'ckpt': + if format == "ckpt": models[name].update( - config = str(stanza.get('config', None)), - weights = str(stanza.get('weights', None)), - vae = str(stanza.get('vae', None)), - width = str(stanza.get('width', 512)), - height = str(stanza.get('height', 512)), + config=str(stanza.get("config", None)), + weights=str(stanza.get("weights", None)), + vae=str(stanza.get("vae", None)), + width=str(stanza.get("width", 512)), + height=str(stanza.get("height", 512)), ) # Diffusers Config Parse - if (vae := stanza.get('vae',None)): - if isinstance(vae,DictConfig): + if vae := stanza.get("vae", None): + if isinstance(vae, DictConfig): vae = dict( - repo_id = str(vae.get('repo_id',None)), - path = str(vae.get('path',None)), - subfolder = str(vae.get('subfolder',None)) + repo_id=str(vae.get("repo_id", None)), + path=str(vae.get("path", None)), + subfolder=str(vae.get("subfolder", None)), ) - if format == 'diffusers': + if format == "diffusers": models[name].update( - vae = vae, - repo_id = str(stanza.get('repo_id', None)), - path = str(stanza.get('path',None)), + vae=vae, + repo_id=str(stanza.get("repo_id", None)), + path=str(stanza.get("path", None)), ) return models def print_models(self) -> None: - ''' + """ Print a table of models, their descriptions, and load status - ''' + """ models = self.list_models() for name in models: - if models[name]['format'] == 'vae': + if models[name]["format"] == "vae": continue line = f'{name:25s} {models[name]["status"]:>10s} {models[name]["format"]:10s} {models[name]["description"]}' - if models[name]['status'] == 'active': - line = f'\033[1m{line}\033[0m' + if models[name]["status"] == "active": + line = f"\033[1m{line}\033[0m" print(line) - def del_model(self, model_name:str, delete_files:bool=False) -> None: - ''' + def del_model(self, model_name: str, delete_files: bool = False) -> None: + """ Delete the named model. - ''' + """ omega = self.config if model_name not in omega: - print(f'** Unknown model {model_name}') + print(f"** Unknown model {model_name}") return # save these for use in deletion later conf = omega[model_name] - repo_id = conf.get('repo_id',None) - path = self._abs_path(conf.get('path',None)) - weights = self._abs_path(conf.get('weights',None)) + repo_id = conf.get("repo_id", None) + path = self._abs_path(conf.get("path", None)) + weights = self._abs_path(conf.get("weights", None)) del omega[model_name] if model_name in self.stack: self.stack.remove(model_name) if delete_files: if weights: - print(f'** deleting file {weights}') + print(f"** deleting file {weights}") Path(weights).unlink(missing_ok=True) elif path: - print(f'** deleting directory {path}') - rmtree(path,ignore_errors=True) + print(f"** deleting directory {path}") + rmtree(path, ignore_errors=True) elif repo_id: - print(f'** deleting the cached model directory for {repo_id}') + print(f"** deleting the cached model directory for {repo_id}") self._delete_model_from_cache(repo_id) - def add_model(self, model_name:str, model_attributes:dict, clobber:bool=False) -> None: - ''' + def add_model( + self, model_name: str, model_attributes: dict, clobber: bool = False + ) -> None: + """ Update the named model with a dictionary of attributes. Will fail with an assertion error if the name already exists. Pass clobber=True to overwrite. On a successful update, the config will be changed in memory and the method will return True. Will fail with an assertion error if provided attributes are incorrect or the model name is missing. - ''' + """ omega = self.config - assert 'format' in model_attributes, 'missing required field "format"' - if model_attributes['format']=='diffusers': - assert 'description' in model_attributes, 'required field "description" is missing' - assert 'path' in model_attributes or 'repo_id' in model_attributes,'model must have either the "path" or "repo_id" fields defined' + assert "format" in model_attributes, 'missing required field "format"' + if model_attributes["format"] == "diffusers": + assert ( + "description" in model_attributes + ), 'required field "description" is missing' + assert ( + "path" in model_attributes or "repo_id" in model_attributes + ), 'model must have either the "path" or "repo_id" fields defined' else: - for field in ('description','weights','height','width','config'): - assert field in model_attributes, f'required field {field} is missing' + for field in ("description", "weights", "height", "width", "config"): + assert field in model_attributes, f"required field {field} is missing" - assert (clobber or model_name not in omega), f'attempt to overwrite existing model definition "{model_name}"' + assert ( + clobber or model_name not in omega + ), f'attempt to overwrite existing model definition "{model_name}"' omega[model_name] = model_attributes - if 'weights' in omega[model_name]: - omega[model_name]['weights'].replace('\\','/') + if "weights" in omega[model_name]: + omega[model_name]["weights"].replace("\\", "/") if clobber: self._invalidate_cached_model(model_name) - def _load_model(self, model_name:str): + def _load_model(self, model_name: str): """Load and initialize the model from configuration variables passed at object creation time""" if model_name not in self.config: - print(f'"{model_name}" is not a known model name. Please check your models.yaml file') + print( + f'"{model_name}" is not a known model name. Please check your models.yaml file' + ) return mconfig = self.config[model_name] @@ -309,66 +328,76 @@ class ModelManager(object): tic = time.time() # this does the work - model_format = mconfig.get('format', 'ckpt') - if model_format == 'ckpt': + model_format = mconfig.get("format", "ckpt") + if model_format == "ckpt": weights = mconfig.weights - print(f'>> Loading {model_name} from {weights}') - model, width, height, model_hash = self._load_ckpt_model(model_name, mconfig) - elif model_format == 'diffusers': + print(f">> Loading {model_name} from {weights}") + model, width, height, model_hash = self._load_ckpt_model( + model_name, mconfig + ) + elif model_format == "diffusers": with warnings.catch_warnings(): - warnings.simplefilter('ignore') + warnings.simplefilter("ignore") model, width, height, model_hash = self._load_diffusers_model(mconfig) else: - raise NotImplementedError(f"Unknown model format {model_name}: {model_format}") + raise NotImplementedError( + f"Unknown model format {model_name}: {model_format}" + ) # usage statistics toc = time.time() - print('>> Model loaded in', '%4.2fs' % (toc - tic)) + print(">> Model loaded in", "%4.2fs" % (toc - tic)) if self._has_cuda(): print( - '>> Max VRAM used to load the model:', - '%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9), - '\n>> Current VRAM usage:' - '%4.2fG' % (torch.cuda.memory_allocated() / 1e9), + ">> Max VRAM used to load the model:", + "%4.2fG" % (torch.cuda.max_memory_allocated() / 1e9), + "\n>> Current VRAM usage:" + "%4.2fG" % (torch.cuda.memory_allocated() / 1e9), ) return model, width, height, model_hash def _load_ckpt_model(self, model_name, mconfig): config = mconfig.config weights = mconfig.weights - vae = mconfig.get('vae') + vae = mconfig.get("vae") width = mconfig.width height = mconfig.height if not os.path.isabs(config): - config = os.path.join(Globals.root,config) + config = os.path.join(Globals.root, config) if not os.path.isabs(weights): - weights = os.path.normpath(os.path.join(Globals.root,weights)) + weights = os.path.normpath(os.path.join(Globals.root, weights)) # if converting automatically to diffusers, then we do the conversion and return # a diffusers pipeline if Globals.ckpt_convert: - print(f'>> Converting legacy checkpoint {model_name} into a diffusers model...') - from ldm.invoke.ckpt_to_diffuser import load_pipeline_from_original_stable_diffusion_ckpt + print( + f">> Converting legacy checkpoint {model_name} into a diffusers model..." + ) + from ldm.invoke.ckpt_to_diffuser import \ + load_pipeline_from_original_stable_diffusion_ckpt + if vae_config := self._choose_diffusers_vae(model_name): vae = self._load_vae(vae_config) pipeline = load_pipeline_from_original_stable_diffusion_ckpt( - checkpoint_path = weights, - original_config_file = config, - vae = vae, + checkpoint_path=weights, + original_config_file=config, + vae=vae, return_generator_pipeline=True, ) return ( - pipeline.to(self.device).to(torch.float16 if self.precision == 'float16' else torch.float32), + pipeline.to(self.device).to( + torch.float16 if self.precision == "float16" else torch.float32 + ), width, height, - 'NOHASH' - ) + "NOHASH", + ) # scan model self.scan_model(model_name, weights) - print(f'>> Loading {model_name} from {weights}') + print(f">> Loading {model_name} from {weights}") # for usage statistics if self._has_cuda(): @@ -379,49 +408,53 @@ class ModelManager(object): # this does the work if not os.path.isabs(config): - config = os.path.join(Globals.root,config) + config = os.path.join(Globals.root, config) omega_config = OmegaConf.load(config) - with open(weights,'rb') as f: + with open(weights, "rb") as f: weight_bytes = f.read() model_hash = self._cached_sha256(weights, weight_bytes) sd = None - if weights.endswith('.safetensors'): + if weights.endswith(".safetensors"): sd = safetensors.torch.load(weight_bytes) else: - sd = torch.load(io.BytesIO(weight_bytes), map_location='cpu') + sd = torch.load(io.BytesIO(weight_bytes), map_location="cpu") del weight_bytes # merged models from auto11 merge board are flat for some reason - if 'state_dict' in sd: - sd = sd['state_dict'] + if "state_dict" in sd: + sd = sd["state_dict"] - print(' | Forcing garbage collection prior to loading new model') + print(" | Forcing garbage collection prior to loading new model") gc.collect() model = instantiate_from_config(omega_config.model) model.load_state_dict(sd, strict=False) - if self.precision == 'float16': - print(' | Using faster float16 precision') + if self.precision == "float16": + print(" | Using faster float16 precision") model = model.to(torch.float16) else: - print(' | Using more accurate float32 precision') + print(" | Using more accurate float32 precision") # look and load a matching vae file. Code borrowed from AUTOMATIC1111 modules/sd_models.py if vae: if not os.path.isabs(vae): - vae = os.path.normpath(os.path.join(Globals.root,vae)) + vae = os.path.normpath(os.path.join(Globals.root, vae)) if os.path.exists(vae): - print(f' | Loading VAE weights from: {vae}') + print(f" | Loading VAE weights from: {vae}") vae_ckpt = None vae_dict = None - if vae.endswith('.safetensors'): + if vae.endswith(".safetensors"): vae_ckpt = safetensors.torch.load_file(vae) vae_dict = {k: v for k, v in vae_ckpt.items() if k[0:4] != "loss"} else: vae_ckpt = torch.load(vae, map_location="cpu") - vae_dict = {k: v for k, v in vae_ckpt['state_dict'].items() if k[0:4] != "loss"} + vae_dict = { + k: v + for k, v in vae_ckpt["state_dict"].items() + if k[0:4] != "loss" + } model.first_stage_model.load_state_dict(vae_dict, strict=False) else: - print(f' | VAE file {vae} not found. Skipping.') + print(f" | VAE file {vae} not found. Skipping.") model.to(self.device) # model.to doesn't change the cond_stage_model.device used to move the tokenizer output, so set it here @@ -435,41 +468,40 @@ class ModelManager(object): # usage statistics toc = time.time() - print('>> Model loaded in', '%4.2fs' % (toc - tic)) + print(">> Model loaded in", "%4.2fs" % (toc - tic)) if self._has_cuda(): print( - '>> Max VRAM used to load the model:', - '%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9), - '\n>> Current VRAM usage:' - '%4.2fG' % (torch.cuda.memory_allocated() / 1e9), + ">> Max VRAM used to load the model:", + "%4.2fG" % (torch.cuda.max_memory_allocated() / 1e9), + "\n>> Current VRAM usage:" + "%4.2fG" % (torch.cuda.memory_allocated() / 1e9), ) return model, width, height, model_hash def _load_diffusers_model(self, mconfig): name_or_path = self.model_name_or_path(mconfig) - using_fp16 = self.precision == 'float16' + using_fp16 = self.precision == "float16" - print(f'>> Loading diffusers model from {name_or_path}') + print(f">> Loading diffusers model from {name_or_path}") if using_fp16: - print(' | Using faster float16 precision') + print(" | Using faster float16 precision") else: - print(' | Using more accurate float32 precision') + print(" | Using more accurate float32 precision") # TODO: scan weights maybe? pipeline_args: dict[str, Any] = dict( - safety_checker=None, - local_files_only=not Globals.internet_available + safety_checker=None, local_files_only=not Globals.internet_available ) - if 'vae' in mconfig and mconfig['vae'] is not None: - vae = self._load_vae(mconfig['vae']) - pipeline_args.update(vae=vae) - if not isinstance(name_or_path,Path): - pipeline_args.update(cache_dir=global_cache_dir('diffusers')) + if "vae" in mconfig and mconfig["vae"] is not None: + vae = self._load_vae(mconfig["vae"]) + pipeline_args.update(vae=vae) + if not isinstance(name_or_path, Path): + pipeline_args.update(cache_dir=global_cache_dir("diffusers")) if using_fp16: pipeline_args.update(torch_dtype=torch.float16) - fp_args_list = [{'revision':'fp16'},{}] + fp_args_list = [{"revision": "fp16"}, {}] else: fp_args_list = [{}] @@ -485,10 +517,12 @@ class ModelManager(object): **fp_args, ) except OSError as e: - if str(e).startswith('fp16 is not a valid'): + if str(e).startswith("fp16 is not a valid"): pass else: - print(f'** An unexpected error occurred while downloading the model: {e})') + print( + f"** An unexpected error occurred while downloading the model: {e})" + ) if pipeline: break @@ -503,77 +537,86 @@ class ModelManager(object): width = pipeline.unet.config.sample_size * pipeline.vae_scale_factor height = width - print(f' | Default image dimensions = {width} x {height}') + print(f" | Default image dimensions = {width} x {height}") return pipeline, width, height, model_hash - def model_name_or_path(self, model_name:Union[str,DictConfig]) -> str | Path: - if isinstance(model_name,DictConfig) or isinstance(model_name,dict): + def model_name_or_path(self, model_name: Union[str, DictConfig]) -> str | Path: + if isinstance(model_name, DictConfig) or isinstance(model_name, dict): mconfig = model_name elif model_name in self.config: mconfig = self.config[model_name] else: - raise ValueError(f'"{model_name}" is not a known model name. Please check your models.yaml file') + raise ValueError( + f'"{model_name}" is not a known model name. Please check your models.yaml file' + ) - if 'path' in mconfig: - path = Path(mconfig['path']) + if "path" in mconfig: + path = Path(mconfig["path"]) if not path.is_absolute(): path = Path(Globals.root, path).resolve() return path - elif 'repo_id' in mconfig: - return mconfig['repo_id'] + elif "repo_id" in mconfig: + return mconfig["repo_id"] else: raise ValueError("Model config must specify either repo_id or path.") - def offload_model(self, model_name:str) -> None: - ''' + def offload_model(self, model_name: str) -> None: + """ Offload the indicated model to CPU. Will call _make_cache_room() to free space if needed. - ''' + """ if model_name not in self.models: return - print(f'>> Offloading {model_name} to CPU') - model = self.models[model_name]['model'] - self.models[model_name]['model'] = self._model_to_cpu(model) + print(f">> Offloading {model_name} to CPU") + model = self.models[model_name]["model"] + self.models[model_name]["model"] = self._model_to_cpu(model) gc.collect() if self._has_cuda(): torch.cuda.empty_cache() def scan_model(self, model_name, checkpoint): - ''' + """ Apply picklescanner to the indicated checkpoint and issue a warning and option to exit if an infected file is identified. - ''' + """ # scan model - print(f'>> Scanning Model: {model_name}') + print(f">> Scanning Model: {model_name}") scan_result = scan_file_path(checkpoint) if scan_result.infected_files != 0: if scan_result.infected_files == 1: - print(f'\n### Issues Found In Model: {scan_result.issues_count}') - print('### WARNING: The model you are trying to load seems to be infected.') - print('### For your safety, InvokeAI will not load this model.') - print('### Please use checkpoints from trusted sources.') + print(f"\n### Issues Found In Model: {scan_result.issues_count}") + print( + "### WARNING: The model you are trying to load seems to be infected." + ) + print("### For your safety, InvokeAI will not load this model.") + print("### Please use checkpoints from trusted sources.") print("### Exiting InvokeAI") sys.exit() else: - print('\n### WARNING: InvokeAI was unable to scan the model you are using.') - model_safe_check_fail = ask_user('Do you want to to continue loading the model?', ['y', 'n']) - if model_safe_check_fail.lower() != 'y': + print( + "\n### WARNING: InvokeAI was unable to scan the model you are using." + ) + model_safe_check_fail = ask_user( + "Do you want to to continue loading the model?", ["y", "n"] + ) + if model_safe_check_fail.lower() != "y": print("### Exiting InvokeAI") sys.exit() else: - print('>> Model scanned ok!') + print(">> Model scanned ok!") - def import_diffuser_model(self, - repo_or_path:Union[str,Path], - model_name:str=None, - description:str=None, - vae:dict=None, - commit_to_conf:Path=None, - )->bool: - ''' + def import_diffuser_model( + self, + repo_or_path: Union[str, Path], + model_name: str = None, + description: str = None, + vae: dict = None, + commit_to_conf: Path = None, + ) -> bool: + """ Attempts to install the indicated diffuser model and returns True if successful. "repo_or_path" can be either a repo-id or a path-like object corresponding to the @@ -583,15 +626,15 @@ class ModelManager(object): then these will be derived from the repo name. If you provide a commit_to_conf path to the configuration file, then the new entry will be committed to the models.yaml file. - ''' + """ model_name = model_name or Path(repo_or_path).stem - description = description or f'imported diffusers model {model_name}' + description = description or f"imported diffusers model {model_name}" new_config = dict( description=description, vae=vae, - format='diffusers', + format="diffusers", ) - if isinstance(repo_or_path,Path) and repo_or_path.exists(): + if isinstance(repo_or_path, Path) and repo_or_path.exists(): new_config.update(path=str(repo_or_path)) else: new_config.update(repo_id=repo_or_path) @@ -601,15 +644,16 @@ class ModelManager(object): self.commit(commit_to_conf) return True - def import_ckpt_model(self, - weights:Union[str,Path], - config:Union[str,Path]='configs/stable-diffusion/v1-inference.yaml', - vae:Union[str,Path]=None, - model_name:str=None, - model_description:str=None, - commit_to_conf:Path=None, - )->bool: - ''' + def import_ckpt_model( + self, + weights: Union[str, Path], + config: Union[str, Path] = "configs/stable-diffusion/v1-inference.yaml", + vae: Union[str, Path] = None, + model_name: str = None, + model_description: str = None, + commit_to_conf: Path = None, + ) -> bool: + """ Attempts to install the indicated ckpt file and returns True if successful. "weights" can be either a path-like object corresponding to a local .ckpt file @@ -625,9 +669,9 @@ class ModelManager(object): then these will be derived from the weight file name. If you provide a commit_to_conf path to the configuration file, then the new entry will be committed to the models.yaml file. - ''' - weights_path = self._resolve_path(weights,'models/ldm/stable-diffusion-v1') - config_path = self._resolve_path(config,'configs/stable-diffusion') + """ + weights_path = self._resolve_path(weights, "models/ldm/stable-diffusion-v1") + config_path = self._resolve_path(config, "configs/stable-diffusion") if weights_path is None or not weights_path.exists(): return False @@ -635,76 +679,88 @@ class ModelManager(object): return False model_name = model_name or Path(weights).stem - model_description = model_description or f'imported stable diffusion weights file {model_name}' + model_description = ( + model_description or f"imported stable diffusion weights file {model_name}" + ) new_config = dict( weights=str(weights_path), config=str(config_path), description=model_description, - format='ckpt', + format="ckpt", width=512, - height=512 + height=512, ) if vae: - new_config['vae'] = vae + new_config["vae"] = vae self.add_model(model_name, new_config, True) if commit_to_conf: self.commit(commit_to_conf) return True def autoconvert_weights( - self, - conf_path:Path, - weights_directory:Path=None, - dest_directory:Path=None, + self, + conf_path: Path, + weights_directory: Path = None, + dest_directory: Path = None, ): - ''' + """ Scan the indicated directory for .ckpt files, convert into diffuser models, and import. - ''' + """ weights_directory = weights_directory or global_autoscan_dir() - dest_directory = dest_directory or Path(global_models_dir(), Globals.converted_ckpts_dir) + dest_directory = dest_directory or Path( + global_models_dir(), Globals.converted_ckpts_dir + ) - print('>> Checking for unconverted .ckpt files in {weights_directory}') + print(">> Checking for unconverted .ckpt files in {weights_directory}") ckpt_files = dict() for root, dirs, files in os.walk(weights_directory): for f in files: - if not f.endswith('.ckpt'): + if not f.endswith(".ckpt"): continue basename = Path(f).stem - dest = Path(dest_directory,basename) + dest = Path(dest_directory, basename) if not dest.exists(): - ckpt_files[Path(root,f)]=dest + ckpt_files[Path(root, f)] = dest - if len(ckpt_files)==0: + if len(ckpt_files) == 0: return - print(f'>> New .ckpt file(s) found in {weights_directory}. Optimizing and importing...') + print( + f">> New .ckpt file(s) found in {weights_directory}. Optimizing and importing..." + ) for ckpt in ckpt_files: self.convert_and_import(ckpt, ckpt_files[ckpt]) self.commit(conf_path) - def convert_and_import(self, - ckpt_path:Path, - diffusers_path:Path, - model_name=None, - model_description=None, - vae= None, - commit_to_conf:Path=None, - )->dict: - ''' + def convert_and_import( + self, + ckpt_path: Path, + diffusers_path: Path, + model_name=None, + model_description=None, + vae=None, + original_config_file: Path = None, + commit_to_conf: Path = None, + ) -> dict: + """ Convert a legacy ckpt weights file to diffuser model and import into models.yaml. - ''' + """ new_config = None - from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffuser import transformers + + from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffuser + if diffusers_path.exists(): - print(f'ERROR: The path {str(diffusers_path)} already exists. Please move or remove it and try again.') + print( + f"ERROR: The path {str(diffusers_path)} already exists. Please move or remove it and try again." + ) return model_name = model_name or diffusers_path.name - model_description = model_description or 'Optimized version of {model_name}' - print(f'>> Optimizing {model_name} (30-60s)') + model_description = model_description or "Optimized version of {model_name}" + print(f">> Optimizing {model_name} (30-60s)") try: # By passing the specified VAE too the conversion function, the autoencoder # will be built into the model rather than tacked on afterward via the config file @@ -712,31 +768,35 @@ class ModelManager(object): convert_ckpt_to_diffuser( ckpt_path, diffusers_path, - extract_ema = True, - vae = vae_model, + extract_ema=True, + original_config_file=original_config_file, + vae=vae_model, ) - print(f' | Success. Optimized model is now located at {str(diffusers_path)}') - print(f' | Writing new config file entry for {model_name}') + print( + f" | Success. Optimized model is now located at {str(diffusers_path)}" + ) + print(f" | Writing new config file entry for {model_name}") new_config = dict( path=str(diffusers_path), description=model_description, - format='diffusers', + format="diffusers", ) if model_name in self.config: self.del_model(model_name) self.add_model(model_name, new_config, True) if commit_to_conf: self.commit(commit_to_conf) - print('>> Conversion succeeded') + print(">> Conversion succeeded") except Exception as e: - print(f'** Conversion failed: {str(e)}') + print(f"** Conversion failed: {str(e)}") + print("** If you are trying to convert an inpainting or 2.X model, please indicate the correct config file (e.g. v1-inpainting-inference.yaml)") return new_config def search_models(self, search_folder): - print(f'>> Finding Models In: {search_folder}') - models_folder_ckpt = Path(search_folder).glob('**/*.ckpt') - models_folder_safetensors = Path(search_folder).glob('**/*.safetensors') + print(f">> Finding Models In: {search_folder}") + models_folder_ckpt = Path(search_folder).glob("**/*.ckpt") + models_folder_safetensors = Path(search_folder).glob("**/*.safetensors") ckpt_files = [x for x in models_folder_ckpt if x.is_file()] safetensor_files = [x for x in models_folder_safetensors if x.is_file] @@ -745,15 +805,15 @@ class ModelManager(object): found_models = [] for file in files: - found_models.append({ - 'name': file.stem, - 'location': str(file.resolve()).replace('\\', '/') - }) + found_models.append( + {"name": file.stem, "location": str(file.resolve()).replace("\\", "/")} + ) return search_folder, found_models - def _choose_diffusers_vae(self, model_name:str, vae:str=None)->Union[dict,str]: - + def _choose_diffusers_vae( + self, model_name: str, vae: str = None + ) -> Union[dict, str]: # In the event that the original entry is using a custom ckpt VAE, we try to # map that VAE onto a diffuser VAE using a hard-coded dictionary. # I would prefer to do this differently: We load the ckpt model into memory, swap the @@ -761,49 +821,65 @@ class ModelManager(object): # VAE is built into the model. However, when I tried this I got obscure key errors. if vae: return vae - if model_name in self.config and (vae_ckpt_path := self.model_info(model_name).get('vae',None)): + if model_name in self.config and ( + vae_ckpt_path := self.model_info(model_name).get("vae", None) + ): vae_basename = Path(vae_ckpt_path).stem diffusers_vae = None - if (diffusers_vae := VAE_TO_REPO_ID.get(vae_basename,None)): - print(f'>> {vae_basename} VAE corresponds to known {diffusers_vae} diffusers version') - vae = {'repo_id': diffusers_vae} + if diffusers_vae := VAE_TO_REPO_ID.get(vae_basename, None): + print( + f">> {vae_basename} VAE corresponds to known {diffusers_vae} diffusers version" + ) + vae = {"repo_id": diffusers_vae} else: - print(f'** Custom VAE "{vae_basename}" found, but corresponding diffusers model unknown') - print('** Using "stabilityai/sd-vae-ft-mse"; If this isn\'t right, please edit the model config') - vae = {'repo_id': 'stabilityai/sd-vae-ft-mse'} + print( + f'** Custom VAE "{vae_basename}" found, but corresponding diffusers model unknown' + ) + print( + '** Using "stabilityai/sd-vae-ft-mse"; If this isn\'t right, please edit the model config' + ) + vae = {"repo_id": "stabilityai/sd-vae-ft-mse"} return vae def _make_cache_room(self) -> None: num_loaded_models = len(self.models) if num_loaded_models >= self.max_loaded_models: least_recent_model = self._pop_oldest_model() - print(f'>> Cache limit (max={self.max_loaded_models}) reached. Purging {least_recent_model}') + print( + f">> Cache limit (max={self.max_loaded_models}) reached. Purging {least_recent_model}" + ) if least_recent_model is not None: del self.models[least_recent_model] gc.collect() def print_vram_usage(self) -> None: if self._has_cuda: - print('>> Current VRAM usage: ','%4.2fG' % (torch.cuda.memory_allocated() / 1e9)) + print( + ">> Current VRAM usage: ", + "%4.2fG" % (torch.cuda.memory_allocated() / 1e9), + ) - def commit(self,config_file_path:str) -> None: - ''' + def commit(self, config_file_path: str) -> None: + """ Write current configuration out to the indicated file. - ''' + """ yaml_str = OmegaConf.to_yaml(self.config) if not os.path.isabs(config_file_path): - config_file_path = os.path.normpath(os.path.join(Globals.root,config_file_path)) - tmpfile = os.path.join(os.path.dirname(config_file_path),'new_config.tmp') - with open(tmpfile, 'w', encoding="utf-8") as outfile: + config_file_path = os.path.normpath( + os.path.join(Globals.root, config_file_path) + ) + tmpfile = os.path.join(os.path.dirname(config_file_path), "new_config.tmp") + with open(tmpfile, "w", encoding="utf-8") as outfile: outfile.write(self.preamble()) outfile.write(yaml_str) - os.replace(tmpfile,config_file_path) + os.replace(tmpfile, config_file_path) def preamble(self) -> str: - ''' + """ Returns the preamble for the config file. - ''' - return textwrap.dedent('''\ + """ + return textwrap.dedent( + """\ # This file describes the alternative machine learning models # available to InvokeAI script. # @@ -811,42 +887,49 @@ class ModelManager(object): # model requires a model config file, a weights file, # and the width and height of the images it # was trained on. - ''') + """ + ) @classmethod def migrate_models(cls): - ''' + """ Migrate the ~/invokeai/models directory from the legacy format used through 2.2.5 to the 2.3.0 "diffusers" version. This should be a one-time operation, called at script startup time. - ''' + """ # Three transformer models to check: bert, clip and safety checker legacy_locations = [ - Path('CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker'), - Path('bert-base-uncased/models--bert-base-uncased'), - Path('openai/clip-vit-large-patch14/models--openai--clip-vit-large-patch14') + Path( + "CompVis/stable-diffusion-safety-checker/models--CompVis--stable-diffusion-safety-checker" + ), + Path("bert-base-uncased/models--bert-base-uncased"), + Path( + "openai/clip-vit-large-patch14/models--openai--clip-vit-large-patch14" + ), ] - models_dir = Path(Globals.root,'models') + models_dir = Path(Globals.root, "models") legacy_layout = False for model in legacy_locations: - legacy_layout = legacy_layout or Path(models_dir,model).exists() + legacy_layout = legacy_layout or Path(models_dir, model).exists() if not legacy_layout: return - print('** Legacy version <= 2.2.5 model directory layout detected. Reorganizing.') - print('** This is a quick one-time operation.') + print( + "** Legacy version <= 2.2.5 model directory layout detected. Reorganizing." + ) + print("** This is a quick one-time operation.") # transformer files get moved into the hub directory if cls._is_huggingface_hub_directory_present(): - hub = global_cache_dir('hub') + hub = global_cache_dir("hub") else: - hub = models_dir / 'hub' + hub = models_dir / "hub" os.makedirs(hub, exist_ok=True) for model in legacy_locations: source = models_dir / model dest = hub / model.stem - print(f'** {source} => {dest}') + print(f"** {source} => {dest}") if source.exists(): if dest.exists(): rmtree(source) @@ -855,183 +938,192 @@ class ModelManager(object): # anything else gets moved into the diffusers directory if cls._is_huggingface_hub_directory_present(): - diffusers = global_cache_dir('diffusers') + diffusers = global_cache_dir("diffusers") else: - diffusers = models_dir / 'diffusers' + diffusers = models_dir / "diffusers" os.makedirs(diffusers, exist_ok=True) for root, dirs, _ in os.walk(models_dir, topdown=False): for dir in dirs: - full_path = Path(root,dir) + full_path = Path(root, dir) if full_path.is_relative_to(hub) or full_path.is_relative_to(diffusers): continue - if Path(dir).match('models--*--*'): + if Path(dir).match("models--*--*"): dest = diffusers / dir - print(f'** {full_path} => {dest}') + print(f"** {full_path} => {dest}") if dest.exists(): rmtree(full_path) else: - move(full_path,dest) + move(full_path, dest) # now clean up by removing any empty directories - empty = [root for root, dirs, files, in os.walk(models_dir) if not len(dirs) and not len(files)] + empty = [ + root + for root, dirs, files, in os.walk(models_dir) + if not len(dirs) and not len(files) + ] for d in empty: os.rmdir(d) - print('** Migration is done. Continuing...') + print("** Migration is done. Continuing...") - - def _resolve_path(self, source: Union[str, Path], dest_directory: str) -> Optional[Path]: + def _resolve_path( + self, source: Union[str, Path], dest_directory: str + ) -> Optional[Path]: resolved_path = None - if str(source).startswith(('http:','https:','ftp:')): + if str(source).startswith(("http:", "https:", "ftp:")): basename = os.path.basename(source) if not os.path.isabs(dest_directory): - dest_directory = os.path.join(Globals.root,dest_directory) - dest = os.path.join(dest_directory,basename) + dest_directory = os.path.join(Globals.root, dest_directory) + dest = os.path.join(dest_directory, basename) if download_with_progress_bar(str(source), Path(dest)): resolved_path = Path(dest) else: if not os.path.isabs(source): - source = os.path.join(Globals.root,source) + source = os.path.join(Globals.root, source) resolved_path = Path(source) return resolved_path - def _invalidate_cached_model(self,model_name:str) -> None: + def _invalidate_cached_model(self, model_name: str) -> None: self.offload_model(model_name) if model_name in self.stack: self.stack.remove(model_name) - self.models.pop(model_name,None) + self.models.pop(model_name, None) - def _model_to_cpu(self,model): - if self.device == 'cpu': + def _model_to_cpu(self, model): + if self.device == "cpu": return model # diffusers really really doesn't like us moving a float16 model onto CPU verbosity = get_verbosity() set_verbosity_error() - model.cond_stage_model.device = 'cpu' - model.to('cpu') + model.cond_stage_model.device = "cpu" + model.to("cpu") set_verbosity(verbosity) - for submodel in ('first_stage_model','cond_stage_model','model'): + for submodel in ("first_stage_model", "cond_stage_model", "model"): try: - getattr(model,submodel).to('cpu') + getattr(model, submodel).to("cpu") except AttributeError: pass return model - def _model_from_cpu(self,model): - if self.device == 'cpu': + def _model_from_cpu(self, model): + if self.device == "cpu": return model model.to(self.device) model.cond_stage_model.device = self.device - for submodel in ('first_stage_model','cond_stage_model','model'): + for submodel in ("first_stage_model", "cond_stage_model", "model"): try: - getattr(model,submodel).to(self.device) + getattr(model, submodel).to(self.device) except AttributeError: pass return model def _pop_oldest_model(self): - ''' + """ Remove the first element of the FIFO, which ought to be the least recently accessed model. Do not pop the last one, because it is in active use! - ''' + """ return self.stack.pop(0) - def _push_newest_model(self,model_name:str) -> None: - ''' + def _push_newest_model(self, model_name: str) -> None: + """ Maintain a simple FIFO. First element is always the least recent, and last element is always the most recent. - ''' + """ with contextlib.suppress(ValueError): self.stack.remove(model_name) self.stack.append(model_name) def _has_cuda(self) -> bool: - return self.device.type == 'cuda' + return self.device.type == "cuda" - def _diffuser_sha256(self,name_or_path:Union[str, Path],chunksize=4096)->Union[str,bytes]: + def _diffuser_sha256( + self, name_or_path: Union[str, Path], chunksize=4096 + ) -> Union[str, bytes]: path = None - if isinstance(name_or_path,Path): - path = name_or_path + if isinstance(name_or_path, Path): + path = name_or_path else: - owner,repo = name_or_path.split('/') - path = Path(global_cache_dir('diffusers') / f'models--{owner}--{repo}') + owner, repo = name_or_path.split("/") + path = Path(global_cache_dir("diffusers") / f"models--{owner}--{repo}") if not path.exists(): return None - hashpath = path / 'checksum.sha256' + hashpath = path / "checksum.sha256" if hashpath.exists() and path.stat().st_mtime <= hashpath.stat().st_mtime: with open(hashpath) as f: hash = f.read() return hash - print(' | Calculating sha256 hash of model files') + print(" | Calculating sha256 hash of model files") tic = time.time() sha = hashlib.sha256() count = 0 for root, dirs, files in os.walk(path, followlinks=False): for name in files: count += 1 - with open(os.path.join(root,name),'rb') as f: + with open(os.path.join(root, name), "rb") as f: while chunk := f.read(chunksize): sha.update(chunk) hash = sha.hexdigest() toc = time.time() - print(f' | sha256 = {hash} ({count} files hashed in','%4.2fs)' % (toc - tic)) - with open(hashpath,'w') as f: + print(f" | sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic)) + with open(hashpath, "w") as f: f.write(hash) return hash - def _cached_sha256(self,path,data) -> Union[str, bytes]: - dirname = os.path.dirname(path) - basename = os.path.basename(path) - base, _ = os.path.splitext(basename) - hashpath = os.path.join(dirname,base+'.sha256') + def _cached_sha256(self, path, data) -> Union[str, bytes]: + dirname = os.path.dirname(path) + basename = os.path.basename(path) + base, _ = os.path.splitext(basename) + hashpath = os.path.join(dirname, base + ".sha256") - if os.path.exists(hashpath) and os.path.getmtime(path) <= os.path.getmtime(hashpath): + if os.path.exists(hashpath) and os.path.getmtime(path) <= os.path.getmtime( + hashpath + ): with open(hashpath) as f: hash = f.read() return hash - print(' | Calculating sha256 hash of weights file') + print(" | Calculating sha256 hash of weights file") tic = time.time() sha = hashlib.sha256() sha.update(data) hash = sha.hexdigest() toc = time.time() - print(f'>> sha256 = {hash}','(%4.2fs)' % (toc - tic)) + print(f">> sha256 = {hash}", "(%4.2fs)" % (toc - tic)) - with open(hashpath,'w') as f: + with open(hashpath, "w") as f: f.write(hash) return hash - def _load_vae(self, vae_config)->AutoencoderKL: + def _load_vae(self, vae_config) -> AutoencoderKL: vae_args = {} name_or_path = self.model_name_or_path(vae_config) - using_fp16 = self.precision == 'float16' + using_fp16 = self.precision == "float16" vae_args.update( - cache_dir=global_cache_dir('diffusers'), + cache_dir=global_cache_dir("diffusers"), local_files_only=not Globals.internet_available, ) - print(f' | Loading diffusers VAE from {name_or_path}') + print(f" | Loading diffusers VAE from {name_or_path}") if using_fp16: vae_args.update(torch_dtype=torch.float16) - fp_args_list = [{'revision':'fp16'},{}] + fp_args_list = [{"revision": "fp16"}, {}] else: - print(' | Using more accurate float32 precision') + print(" | Using more accurate float32 precision") fp_args_list = [{}] vae = None deferred_error = None # A VAE may be in a subfolder of a model's repository. - if 'subfolder' in vae_config: - vae_args['subfolder'] = vae_config['subfolder'] + if "subfolder" in vae_config: + vae_args["subfolder"] = vae_config["subfolder"] for fp_args in fp_args_list: # At some point we might need to be able to use different classes here? But for now I think @@ -1039,7 +1131,7 @@ class ModelManager(object): try: vae = AutoencoderKL.from_pretrained(name_or_path, **vae_args, **fp_args) except OSError as e: - if str(e).startswith('fp16 is not a valid'): + if str(e).startswith("fp16 is not a valid"): pass else: deferred_error = e @@ -1047,31 +1139,35 @@ class ModelManager(object): break if not vae and deferred_error: - print(f'** Could not load VAE {name_or_path}: {str(deferred_error)}') + print(f"** Could not load VAE {name_or_path}: {str(deferred_error)}") return vae @staticmethod def _delete_model_from_cache(repo_id): - cache_info = scan_cache_dir(global_cache_dir('diffusers')) + cache_info = scan_cache_dir(global_cache_dir("diffusers")) # I'm sure there is a way to do this with comprehensions # but the code quickly became incomprehensible! hashes_to_delete = set() for repo in cache_info.repos: - if repo.repo_id==repo_id: + if repo.repo_id == repo_id: for revision in repo.revisions: hashes_to_delete.add(revision.commit_hash) strategy = cache_info.delete_revisions(*hashes_to_delete) - print(f'** deletion of this model is expected to free {strategy.expected_freed_size_str}') + print( + f"** deletion of this model is expected to free {strategy.expected_freed_size_str}" + ) strategy.execute() @staticmethod - def _abs_path(path:Union(str,Path))->Path: + def _abs_path(path: Union(str, Path)) -> Path: if path is None or Path(path).is_absolute(): return path - return Path(Globals.root,path).resolve() + return Path(Globals.root, path).resolve() @staticmethod def _is_huggingface_hub_directory_present() -> bool: - return os.getenv('HF_HOME') is not None or os.getenv('XDG_CACHE_HOME') is not None + return ( + os.getenv("HF_HOME") is not None or os.getenv("XDG_CACHE_HOME") is not None + ) From 511df2963be36a97c52356141d4b1fc948ee1794 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 6 Feb 2023 09:45:21 -0500 Subject: [PATCH 18/52] remove debugging statement --- ldm/invoke/ckpt_to_diffuser.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/ldm/invoke/ckpt_to_diffuser.py b/ldm/invoke/ckpt_to_diffuser.py index dece856bc9..895484828f 100644 --- a/ldm/invoke/ckpt_to_diffuser.py +++ b/ldm/invoke/ckpt_to_diffuser.py @@ -831,8 +831,6 @@ def load_pipeline_from_original_stable_diffusion_ckpt( running stable diffusion 2.1. ''' - print(f'DEBUG: original_config_file={original_config_file}') - with warnings.catch_warnings(): warnings.simplefilter('ignore') verbosity = dlogging.get_verbosity() From f80a64a0f44b924a60ad40c2822d3e6ddd3a03d4 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 4 Feb 2023 11:32:22 +1100 Subject: [PATCH 19/52] Reorganises internal state `options` slice was huge and managed a mix of generation parameters and general app settings. It has been split up: - Generation parameters are now in `generationSlice`. - Postprocessing parameters are now in `postprocessingSlice` - UI related things are now in `uiSlice` There is probably more to be done, like `gallerySlice` perhaps should only manage internal gallery state, and not if the gallery is displayed. Full-slice selectors have been made for each slice. Other organisational tweaks. --- .../dist/assets/index-legacy-35973932.js | 52 + .../dist/assets/index-legacy-4add591a.js | 4 + invokeai/frontend/dist/index.html | 9 + .../locales/{options => parameters}/de.json | 0 .../{options => parameters}/en-US.json | 0 .../locales/{options => parameters}/en.json | 2 +- .../locales/{options => parameters}/es.json | 0 .../locales/{options => parameters}/fr.json | 0 .../locales/{options => parameters}/it.json | 0 .../locales/{options => parameters}/ja.json | 0 .../locales/{options => parameters}/nl.json | 0 .../locales/{options => parameters}/pl.json | 0 .../locales/{options => parameters}/pt.json | 0 .../{options => parameters}/pt_br.json | 0 .../locales/{options => parameters}/ru.json | 0 .../locales/{options => parameters}/ua.json | 0 .../{options => parameters}/zh_cn.json | 0 invokeai/frontend/src/app/App.tsx | 8 +- invokeai/frontend/src/app/invokeai.d.ts | 2 +- .../src/app/selectors/readinessSelector.ts | 20 +- invokeai/frontend/src/app/socketio/actions.ts | 2 +- .../frontend/src/app/socketio/emitters.ts | 22 +- .../frontend/src/app/socketio/listeners.ts | 13 +- invokeai/frontend/src/app/store.ts | 14 +- .../src/common/components/GuidePopover.tsx | 8 +- .../src/common/components/ImageUploader.tsx | 4 +- .../src/common/util/parameterTranslation.ts | 39 +- .../canvas/components/IAICanvasResizer.tsx | 2 +- .../IAICanvasToolbar/IAICanvasRedoButton.tsx | 2 +- .../IAICanvasToolbar/IAICanvasUndoButton.tsx | 2 +- .../features/canvas/hooks/useCanvasHotkeys.ts | 2 +- .../canvas/hooks/useCanvasMouseDown.ts | 2 +- .../canvas/hooks/useCanvasMouseMove.ts | 2 +- .../features/canvas/hooks/useCanvasMouseUp.ts | 2 +- .../features/canvas/store/canvasSelectors.ts | 2 +- .../components/CurrentImageButtons.tsx | 168 +- .../components/CurrentImageDisplay.tsx | 18 +- .../components/CurrentImagePreview.tsx | 10 +- .../gallery/components/DeleteImageModal.tsx | 11 +- .../gallery/components/HoverableImage.tsx | 41 +- .../gallery/components/ImageGallery.tsx | 8 +- .../ImageMetadataViewer.tsx | 18 +- .../gallery/hooks/useGetImageByUuid.ts | 2 +- ...ySliceSelectors.ts => gallerySelectors.ts} | 41 +- .../features/gallery/store/gallerySlice.ts | 3 +- .../gallery/store/thunks/uploadImage.ts | 4 +- .../features/lightbox/components/Lightbox.tsx | 8 +- .../lightbox/store/lightboxSelectors.ts | 13 + .../features/lightbox/store/lightboxSlice.ts | 26 + .../Output/ImageToImageOutputOptions.tsx | 12 - .../AdvancedOptions/Output/OutputOptions.tsx | 14 - .../MainAdvancedOptionsCheckbox.tsx | 24 - .../options/store/optionsSelectors.ts | 32 - .../AccordionItems/AdvancedSettings.scss | 8 +- .../AccordionItems/InvokeAccordionItem.tsx | 6 +- .../BoundingBox}/BoundingBoxSettings.scss | 0 .../BoundingBox}/BoundingBoxSettings.tsx | 6 +- .../Canvas/InfillAndScalingSettings.tsx} | 24 +- .../Canvas/SeamCorrection}/SeamBlur.tsx | 9 +- .../SeamCorrectionSettings.tsx} | 4 +- .../Canvas/SeamCorrection}/SeamSize.tsx | 6 +- .../Canvas/SeamCorrection}/SeamSteps.tsx | 6 +- .../Canvas/SeamCorrection}/SeamStrength.tsx | 6 +- .../FaceRestore/FaceRestoreSettings.tsx} | 57 +- .../FaceRestore/FaceRestoreToggle.tsx | 4 +- .../ImageToImage/ImageFit.tsx | 6 +- .../ImageToImage/ImageToImageStrength.tsx | 6 +- .../Output/HiresSettings.tsx} | 42 +- .../Output/ImageToImageOutputSettings.tsx | 12 + .../Output/OutputSettings.tsx | 14 + .../Output/SeamlessSettings.tsx} | 10 +- .../AdvancedParameters}/Seed/Perlin.tsx | 6 +- .../Seed/RandomizeSeed.tsx | 6 +- .../AdvancedParameters}/Seed/Seed.tsx | 10 +- .../AdvancedParameters/Seed/SeedSettings.tsx} | 4 +- .../AdvancedParameters}/Seed/ShuffleSeed.tsx | 6 +- .../AdvancedParameters}/Seed/Threshold.tsx | 6 +- .../Upscale/UpscaleSettings.scss} | 2 +- .../Upscale/UpscaleSettings.tsx} | 48 +- .../Upscale/UpscaleToggle.tsx | 4 +- .../Variations/GenerateVariations.tsx | 4 +- .../Variations/SeedWeights.tsx | 8 +- .../Variations/VariationAmount.tsx | 8 +- .../Variations/VariationsSettings.tsx} | 4 +- .../MainParameters}/MainCFGScale.tsx | 8 +- .../components/MainParameters}/MainHeight.tsx | 10 +- .../MainParameters}/MainIterations.tsx | 14 +- .../MainParameters/MainParameters.scss} | 15 +- .../MainParameters/MainParameters.tsx} | 10 +- .../MainParameters}/MainSampler.tsx | 13 +- .../components/MainParameters}/MainSteps.tsx | 8 +- .../components/MainParameters}/MainWidth.tsx | 10 +- .../components/ParametersAccordion.tsx} | 14 +- .../ProcessButtons/CancelButton.tsx | 8 +- .../ProcessButtons/InvokeButton.tsx | 8 +- .../components/ProcessButtons/Loopback.tsx | 15 +- .../ProcessButtons/ProcessButtons.scss | 0 .../ProcessButtons/ProcessButtons.tsx | 2 +- .../PromptInput/NegativePromptInput.tsx | 12 +- .../components/PromptInput/PromptInput.scss | 0 .../components/PromptInput/PromptInput.tsx | 12 +- .../parameters/store/generationSelectors.ts | 17 + .../store/generationSlice.ts} | 183 +- .../store/postprocessingSelectors.ts | 3 + .../parameters/store/postprocessingSlice.ts | 94 + .../features/system/components/Console.tsx | 10 +- .../ModelManager/AddCheckpointModel.tsx | 2 +- .../ModelManager/AddDiffusersModel.tsx | 2 +- .../components/ModelManager/AddModel.tsx | 4 +- .../components/ModelManager/ModelList.tsx | 4 +- .../components/ModelManager/SearchModels.tsx | 4 +- .../system/components/ProgressBar.tsx | 8 +- .../SettingsModal/SettingsModal.tsx | 12 +- .../system/components/StatusIndicator.tsx | 8 +- .../system/components/ThemeChanger.tsx | 4 +- .../features/system/store/systemSelectors.ts | 9 +- .../ImageToImage/ImageToImagePanel.tsx | 86 - .../TextToImage/TextToImagePanel.tsx | 64 - .../UnifiedCanvas/UnifiedCanvasPanel.tsx | 66 - .../components/FloatingButton.scss | 0 .../components/FloatingGalleryButton.tsx | 18 +- .../FloatingParametersPanelButtons.tsx} | 72 +- .../components/ImageToImage/ImageToImage.scss | 0 .../ImageToImage/ImageToImageDisplay.tsx | 2 +- .../ImageToImage/ImageToImagePanel.tsx | 86 + .../ImageToImage/InitImagePreview.tsx | 6 +- .../ImageToImage/InitialImageOverlay.tsx | 2 +- .../components/ImageToImage/index.tsx | 2 +- .../components/InvokeParametersPanel.scss} | 16 +- .../components/InvokeParametersPanel.tsx} | 99 +- .../{tabs => ui}/components/InvokeTabs.scss | 0 .../{tabs => ui}/components/InvokeTabs.tsx | 17 +- .../components/InvokeWorkarea.scss | 0 .../components/InvokeWorkarea.tsx | 36 +- .../components/TextToImage/TextToImage.scss | 0 .../TextToImage/TextToImageDisplay.tsx | 0 .../TextToImage/TextToImagePanel.tsx | 64 + .../components/TextToImage/index.tsx | 2 +- .../UnifiedCanvas/CanvasWorkarea.scss | 0 .../UnifiedCanvasDisplayBeta.tsx | 0 .../UnifiedCanvasBaseBrushSettings.tsx | 0 .../UnifiedCanvasBrushSettings.tsx | 0 .../UnifiedCanvasBrushSize.tsx | 0 .../UnifiedCanvasClearMask.tsx | 0 .../UnifiedCanvasColorPicker.tsx | 0 .../UnifiedCanvasDarkenOutsideSelection.tsx | 0 .../UnifiedCanvasEnableMask.tsx | 0 .../UnifiedCanvasLimitStrokesToBox.tsx | 0 .../UnifiedCanvasMaskBrushSettings.tsx | 0 .../UnifiedCanvasMoveSettings.tsx | 0 .../UnifiedCanvasPreserveMask.tsx | 0 .../UnifiedCanvasSettings.tsx | 0 .../UnifiedCanvasShowGrid.tsx | 0 .../UnifiedCanvasSnapToGrid.tsx | 0 .../UnifiedCanvasToolSettingsBeta.tsx | 0 .../UnifiedCanvasCopyToClipboard.tsx | 0 .../UnifiedCanvasDownloadImage.tsx | 0 .../UnifiedCanvasFileUploader.tsx | 0 .../UnifiedCanvasLayerSelect.tsx | 0 .../UnifiedCanvasMergeVisible.tsx | 0 .../UnifiedCanvasMoveTool.tsx | 0 .../UnifiedCanvasProcessingButtons.tsx | 18 +- .../UnifiedCanvasResetCanvas.tsx | 0 .../UnifiedCanvasResetView.tsx | 0 .../UnifiedCanvasSaveToGallery.tsx | 0 .../UnifiedCanvasToolSelect.tsx | 0 .../UnifiedCanvasToolbarBeta.tsx | 6 +- .../UnifiedCanvas/UnifiedCanvasDisplay.tsx | 0 .../UnifiedCanvas/UnifiedCanvasPanel.tsx | 66 + .../UnifiedCanvas/UnifiedCanvasWorkarea.tsx | 4 +- .../src/features/{tabs => ui/store}/tabMap.ts | 0 .../src/features/ui/store/uiSelectors.ts | 35 + .../frontend/src/features/ui/store/uiSlice.ts | 89 + .../frontend/src/features/ui/store/uiTypes.ts | 15 + invokeai/frontend/src/i18.d.ts | 4 +- invokeai/frontend/src/i18n.ts | 2 +- .../src/styles/Mixins/_Responsive.scss | 6 +- invokeai/frontend/src/styles/index.scss | 35 +- invokeai/frontend/stats.html | 6177 +++++++++++++++++ 179 files changed, 7463 insertions(+), 1165 deletions(-) create mode 100644 invokeai/frontend/dist/assets/index-legacy-35973932.js rename invokeai/frontend/public/locales/{options => parameters}/de.json (100%) rename invokeai/frontend/public/locales/{options => parameters}/en-US.json (100%) rename invokeai/frontend/public/locales/{options => parameters}/en.json (100%) rename invokeai/frontend/public/locales/{options => parameters}/es.json (100%) rename invokeai/frontend/public/locales/{options => parameters}/fr.json (100%) rename invokeai/frontend/public/locales/{options => parameters}/it.json (100%) rename invokeai/frontend/public/locales/{options => parameters}/ja.json (100%) rename invokeai/frontend/public/locales/{options => parameters}/nl.json (100%) rename invokeai/frontend/public/locales/{options => parameters}/pl.json (100%) rename invokeai/frontend/public/locales/{options => parameters}/pt.json (100%) rename invokeai/frontend/public/locales/{options => parameters}/pt_br.json (100%) rename invokeai/frontend/public/locales/{options => parameters}/ru.json (100%) rename invokeai/frontend/public/locales/{options => parameters}/ua.json (100%) rename invokeai/frontend/public/locales/{options => parameters}/zh_cn.json (100%) rename invokeai/frontend/src/features/gallery/store/{gallerySliceSelectors.ts => gallerySelectors.ts} (70%) create mode 100644 invokeai/frontend/src/features/lightbox/store/lightboxSelectors.ts create mode 100644 invokeai/frontend/src/features/lightbox/store/lightboxSlice.ts delete mode 100644 invokeai/frontend/src/features/options/components/AdvancedOptions/Output/ImageToImageOutputOptions.tsx delete mode 100644 invokeai/frontend/src/features/options/components/AdvancedOptions/Output/OutputOptions.tsx delete mode 100644 invokeai/frontend/src/features/options/components/MainOptions/MainAdvancedOptionsCheckbox.tsx delete mode 100644 invokeai/frontend/src/features/options/store/optionsSelectors.ts rename invokeai/frontend/src/features/{options => parameters}/components/AccordionItems/AdvancedSettings.scss (89%) rename invokeai/frontend/src/features/{options => parameters}/components/AccordionItems/InvokeAccordionItem.tsx (83%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions/Canvas/BoundingBoxSettings => parameters/components/AdvancedParameters/Canvas/BoundingBox}/BoundingBoxSettings.scss (100%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions/Canvas/BoundingBoxSettings => parameters/components/AdvancedParameters/Canvas/BoundingBox}/BoundingBoxSettings.tsx (95%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions/Canvas/InfillAndScalingOptions.tsx => parameters/components/AdvancedParameters/Canvas/InfillAndScalingSettings.tsx} (87%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions/Canvas/SeamCorrectionOptions => parameters/components/AdvancedParameters/Canvas/SeamCorrection}/SeamBlur.tsx (76%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions/Canvas/SeamCorrectionOptions/SeamCorrectionOptions.tsx => parameters/components/AdvancedParameters/Canvas/SeamCorrection/SeamCorrectionSettings.tsx} (81%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions/Canvas/SeamCorrectionOptions => parameters/components/AdvancedParameters/Canvas/SeamCorrection}/SeamSize.tsx (77%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions/Canvas/SeamCorrectionOptions => parameters/components/AdvancedParameters/Canvas/SeamCorrection}/SeamSteps.tsx (81%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions/Canvas/SeamCorrectionOptions => parameters/components/AdvancedParameters/Canvas/SeamCorrection}/SeamStrength.tsx (80%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions/FaceRestore/FaceRestoreOptions.tsx => parameters/components/AdvancedParameters/FaceRestore/FaceRestoreSettings.tsx} (66%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions => parameters/components/AdvancedParameters}/FaceRestore/FaceRestoreToggle.tsx (82%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions => parameters/components/AdvancedParameters}/ImageToImage/ImageFit.tsx (77%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions => parameters/components/AdvancedParameters}/ImageToImage/ImageToImageStrength.tsx (83%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions/Output/HiresOptions.tsx => parameters/components/AdvancedParameters/Output/HiresSettings.tsx} (63%) create mode 100644 invokeai/frontend/src/features/parameters/components/AdvancedParameters/Output/ImageToImageOutputSettings.tsx create mode 100644 invokeai/frontend/src/features/parameters/components/AdvancedParameters/Output/OutputSettings.tsx rename invokeai/frontend/src/features/{options/components/AdvancedOptions/Output/SeamlessOptions.tsx => parameters/components/AdvancedParameters/Output/SeamlessSettings.tsx} (72%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions => parameters/components/AdvancedParameters}/Seed/Perlin.tsx (75%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions => parameters/components/AdvancedParameters}/Seed/RandomizeSeed.tsx (78%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions => parameters/components/AdvancedParameters}/Seed/Seed.tsx (74%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions/Seed/SeedOptions.tsx => parameters/components/AdvancedParameters/Seed/SeedSettings.tsx} (91%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions => parameters/components/AdvancedParameters}/Seed/ShuffleSeed.tsx (81%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions => parameters/components/AdvancedParameters}/Seed/Threshold.tsx (79%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions/Upscale/UpscaleOptions.scss => parameters/components/AdvancedParameters/Upscale/UpscaleSettings.scss} (78%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions/Upscale/UpscaleOptions.tsx => parameters/components/AdvancedParameters/Upscale/UpscaleSettings.tsx} (59%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions => parameters/components/AdvancedParameters}/Upscale/UpscaleToggle.tsx (82%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions => parameters/components/AdvancedParameters}/Variations/GenerateVariations.tsx (80%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions => parameters/components/AdvancedParameters}/Variations/SeedWeights.tsx (79%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions => parameters/components/AdvancedParameters}/Variations/VariationAmount.tsx (75%) rename invokeai/frontend/src/features/{options/components/AdvancedOptions/Variations/VariationsOptions.tsx => parameters/components/AdvancedParameters/Variations/VariationsSettings.tsx} (83%) rename invokeai/frontend/src/features/{options/components/MainOptions => parameters/components/MainParameters}/MainCFGScale.tsx (73%) rename invokeai/frontend/src/features/{options/components/MainOptions => parameters/components/MainParameters}/MainHeight.tsx (72%) rename invokeai/frontend/src/features/{options/components/MainOptions => parameters/components/MainParameters}/MainIterations.tsx (78%) rename invokeai/frontend/src/features/{options/components/MainOptions/MainOptions.scss => parameters/components/MainParameters/MainParameters.scss} (69%) rename invokeai/frontend/src/features/{options/components/MainOptions/MainOptions.tsx => parameters/components/MainParameters/MainParameters.tsx} (70%) rename invokeai/frontend/src/features/{options/components/MainOptions => parameters/components/MainParameters}/MainSampler.tsx (77%) rename invokeai/frontend/src/features/{options/components/MainOptions => parameters/components/MainParameters}/MainSteps.tsx (73%) rename invokeai/frontend/src/features/{options/components/MainOptions => parameters/components/MainParameters}/MainWidth.tsx (72%) rename invokeai/frontend/src/features/{options/components/OptionsAccordion.tsx => parameters/components/ParametersAccordion.tsx} (83%) rename invokeai/frontend/src/features/{options => parameters}/components/ProcessButtons/CancelButton.tsx (90%) rename invokeai/frontend/src/features/{options => parameters}/components/ProcessButtons/InvokeButton.tsx (89%) rename invokeai/frontend/src/features/{options => parameters}/components/ProcessButtons/Loopback.tsx (69%) rename invokeai/frontend/src/features/{options => parameters}/components/ProcessButtons/ProcessButtons.scss (100%) rename invokeai/frontend/src/features/{options => parameters}/components/ProcessButtons/ProcessButtons.tsx (86%) rename invokeai/frontend/src/features/{options => parameters}/components/PromptInput/NegativePromptInput.tsx (79%) rename invokeai/frontend/src/features/{options => parameters}/components/PromptInput/PromptInput.scss (100%) rename invokeai/frontend/src/features/{options => parameters}/components/PromptInput/PromptInput.tsx (84%) create mode 100644 invokeai/frontend/src/features/parameters/store/generationSelectors.ts rename invokeai/frontend/src/features/{options/store/optionsSlice.ts => parameters/store/generationSlice.ts} (62%) create mode 100644 invokeai/frontend/src/features/parameters/store/postprocessingSelectors.ts create mode 100644 invokeai/frontend/src/features/parameters/store/postprocessingSlice.ts delete mode 100644 invokeai/frontend/src/features/tabs/components/ImageToImage/ImageToImagePanel.tsx delete mode 100644 invokeai/frontend/src/features/tabs/components/TextToImage/TextToImagePanel.tsx delete mode 100644 invokeai/frontend/src/features/tabs/components/UnifiedCanvas/UnifiedCanvasPanel.tsx rename invokeai/frontend/src/features/{tabs => ui}/components/FloatingButton.scss (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/FloatingGalleryButton.tsx (73%) rename invokeai/frontend/src/features/{tabs/components/FloatingOptionsPanelButtons.tsx => ui/components/FloatingParametersPanelButtons.tsx} (60%) rename invokeai/frontend/src/features/{tabs => ui}/components/ImageToImage/ImageToImage.scss (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/ImageToImage/ImageToImageDisplay.tsx (94%) create mode 100644 invokeai/frontend/src/features/ui/components/ImageToImage/ImageToImagePanel.tsx rename invokeai/frontend/src/features/{tabs => ui}/components/ImageToImage/InitImagePreview.tsx (86%) rename invokeai/frontend/src/features/{tabs => ui}/components/ImageToImage/InitialImageOverlay.tsx (89%) rename invokeai/frontend/src/features/{tabs => ui}/components/ImageToImage/index.tsx (81%) rename invokeai/frontend/src/features/{tabs/components/InvokeOptionsPanel.scss => ui/components/InvokeParametersPanel.scss} (84%) rename invokeai/frontend/src/features/{tabs/components/InvokeOptionsPanel.tsx => ui/components/InvokeParametersPanel.tsx} (63%) rename invokeai/frontend/src/features/{tabs => ui}/components/InvokeTabs.scss (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/InvokeTabs.tsx (91%) rename invokeai/frontend/src/features/{tabs => ui}/components/InvokeWorkarea.scss (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/InvokeWorkarea.tsx (74%) rename invokeai/frontend/src/features/{tabs => ui}/components/TextToImage/TextToImage.scss (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/TextToImage/TextToImageDisplay.tsx (100%) create mode 100644 invokeai/frontend/src/features/ui/components/TextToImage/TextToImagePanel.tsx rename invokeai/frontend/src/features/{tabs => ui}/components/TextToImage/index.tsx (80%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/CanvasWorkarea.scss (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasDisplayBeta.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBaseBrushSettings.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBrushSettings.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasBrushSize.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasClearMask.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasColorPicker.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasDarkenOutsideSelection.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasEnableMask.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasLimitStrokesToBox.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasMaskBrushSettings.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasMoveSettings.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasPreserveMask.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSettings.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasShowGrid.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettings/UnifiedCanvasSnapToGrid.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolSettingsBeta.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasCopyToClipboard.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasDownloadImage.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasFileUploader.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasLayerSelect.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasMergeVisible.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasMoveTool.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasProcessingButtons.tsx (65%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasResetCanvas.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasResetView.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasSaveToGallery.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbar/UnifiedCanvasToolSelect.tsx (100%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasBeta/UnifiedCanvasToolbarBeta.tsx (92%) rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasDisplay.tsx (100%) create mode 100644 invokeai/frontend/src/features/ui/components/UnifiedCanvas/UnifiedCanvasPanel.tsx rename invokeai/frontend/src/features/{tabs => ui}/components/UnifiedCanvas/UnifiedCanvasWorkarea.tsx (83%) rename invokeai/frontend/src/features/{tabs => ui/store}/tabMap.ts (100%) create mode 100644 invokeai/frontend/src/features/ui/store/uiSelectors.ts create mode 100644 invokeai/frontend/src/features/ui/store/uiSlice.ts create mode 100644 invokeai/frontend/src/features/ui/store/uiTypes.ts create mode 100644 invokeai/frontend/stats.html diff --git a/invokeai/frontend/dist/assets/index-legacy-35973932.js b/invokeai/frontend/dist/assets/index-legacy-35973932.js new file mode 100644 index 0000000000..dbd3ceabf5 --- /dev/null +++ b/invokeai/frontend/dist/assets/index-legacy-35973932.js @@ -0,0 +1,52 @@ +!function(){function e(e,t,n){return(t=function(e){var t=function(e,t){if("object"!=typeof e||null===e)return e;var n=e[Symbol.toPrimitive];if(void 0!==n){var r=n.call(e,t||"default");if("object"!=typeof r)return r;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"==typeof t?t:String(t)}(t))in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}System.register([],(function(t,n){"use strict";var r=document.createElement("style");return r.textContent="@font-face{font-family:Inter;src:url("+new URL("Inter.b9a8e5e2.ttf",n.meta.url).href+");font-display:swap;font-weight:400;font-style:normal}@font-face{font-family:Inter;src:url("+new URL("Inter-Bold.790c108b.ttf",n.meta.url).href+');font-display:swap;font-weight:600;font-style:normal}@keyframes slideOut{0%{transform:translate(10rem)}to{transform:translate(0)}}@keyframes pulse{0%{transform:scale(1)}50%{transform:scale(1.1)}to{transform:scale(1)}}*{scrollbar-width:thick;scrollbar-color:var(--scrollbar-color) transparent}*::-webkit-scrollbar{width:8px;height:8px}*::-webkit-scrollbar-track{background:transparent}*::-webkit-scrollbar-thumb{background:var(--scrollbar-color);border-radius:8px;border:2px solid var(--scrollbar-color)}*::-webkit-scrollbar-thumb:hover{background:var(--scrollbar-color-hover);border:2px solid var(--scrollbar-color-hover)}::-webkit-scrollbar-button{background:transparent}[data-theme=dark]{--white: rgb(255, 255, 255);--accent-color-dim: rgb(57, 25, 153);--accent-color: rgb(80, 40, 200);--accent-color-bright: rgb(104, 60, 230);--accent-color-hover: var(--accent-color-bright);--root-bg-color: rgb(10, 10, 10);--background-color: rgb(26, 26, 32);--background-color-light: rgb(40, 44, 48);--background-color-secondary: rgb(16, 16, 22);--text-color: rgb(255, 255, 255);--text-color-secondary: rgb(160, 162, 188);--subtext-color: rgb(24, 24, 34);--subtext-color-bright: rgb(48, 48, 64);--border-color: rgb(30, 30, 46);--border-color-light: rgb(60, 60, 76);--svg-color: rgb(255, 255, 255);--invalid: rgb(255, 75, 75);--invalid-secondary: rgb(120, 5, 5);--destructive-color: rgb(185, 55, 55);--destructive-color-hover: rgb(255, 75, 75);--warning-color: rgb(200, 88, 40);--warning-color-hover: rgb(230, 117, 60);--border-color-invalid: rgb(255, 80, 50);--box-shadow-color-invalid: rgb(210, 30, 10);--tab-color: rgb(30, 32, 42);--tab-hover-color: rgb(46, 48, 58);--tab-panel-bg: rgb(36, 38, 48);--tab-list-bg: var(--accent-color);--tab-list-text: rgb(202, 204, 216);--tab-list-text-inactive: rgb(92, 94, 114);--btn-base-color: rgb(30, 32, 42);--btn-base-color-hover: rgb(46, 48, 68);--btn-load-more: rgb(30, 32, 42);--btn-load-more-hover: rgb(54, 56, 66);--btn-svg-color: rgb(255, 255, 255);--btn-delete-image: rgb(182, 46, 46);--btn-checkbox-border-hover: rgb(46, 48, 68);--progress-bar-color: var(--accent-color);--prompt-bg-color: rgb(10, 10, 10);--switch-bg-color: rgb(100, 102, 110);--switch-bg-active-color: var(--accent-color);--slider-color: var(--accent-color-bright);--slider-color: rgb(151, 113, 255);--slider-mark-color: rgb(151, 113, 255);--resizeable-handle-border-color: var(--accent-color);--metadata-bg-color: rgba(0, 0, 0, .7);--metadata-json-bg-color: rgba(255, 255, 255, .1);--status-good-color: rgb(125, 255, 100);--status-good-glow: rgb(40, 215, 40);--status-working-color: rgb(255, 175, 55);--status-working-glow: rgb(255, 160, 55);--status-bad-color: rgb(255, 90, 90);--status-bad-glow: rgb(255, 40, 40);--notice-color: rgb(130, 71, 19);--settings-modal-bg: rgb(30, 32, 42);--input-checkbox-bg: rgb(60, 64, 68);--input-checkbox-checked-bg: var(--accent-color);--input-checkbox-checked-tick: rgb(0, 0, 0);--input-border-color: var(--accent-color-bright);--input-box-shadow-color: var(--accent-color);--error-level-info: rgb(200, 202, 224);--error-level-warning: rgb(255, 225, 105);--error-level-error: rgb(255, 81, 46);--console-bg-color: rgb(30, 30, 36);--console-icon-button-bg-color: rgb(50, 53, 64);--console-icon-button-bg-color-hover: rgb(70, 73, 84);--img2img-img-bg-color: rgb(30, 32, 42);--context-menu-bg-color: rgb(46, 48, 58);--context-menu-box-shadow: none;--context-menu-bg-color-hover: rgb(30, 32, 42);--floating-button-drop-shadow-color: var(--accent-color);--inpainting-alerts-bg: rgba(20, 20, 26, .75);--inpainting-alerts-icon-color: rgb(255, 255, 255);--inpainting-alerts-bg-active: var(--accent-color);--inpainting-alerts-icon-active: rgb(255, 255, 255);--inpainting-alerts-bg-alert: var(--invalid);--inpainting-alerts-icon-alert: rgb(255, 255, 255);--checkboard-dots-color: rgb(35, 35, 39);--scrollbar-color: var(--accent-color);--scrollbar-color-hover: var(--accent-color-bright)}[data-theme=light]{--white: rgb(255, 255, 255);--accent-color-dim: rgb(186, 146, 0);--accent-color: rgb(235, 185, 5);--accent-color-bright: rgb(255, 200, 0);--accent-color-hover: var(--accent-color-bright);--root-bg-color: rgb(255, 255, 255);--background-color: rgb(220, 222, 224);--background-color-light: rgb(250, 252, 254);--background-color-secondary: rgb(208, 210, 212);--text-color: rgb(0, 0, 0);--text-color-secondary: rgb(40, 40, 40);--subtext-color: rgb(24, 24, 34);--subtext-color-bright: rgb(142, 144, 146);--border-color: rgb(200, 200, 200);--border-color-light: rgb(147, 147, 147);--svg-color: rgb(50, 50, 50);--invalid: rgb(255, 75, 75);--invalid-secondary: rgb(120, 5, 5);--destructive-color: rgb(237, 51, 51);--destructive-color-hover: rgb(255, 55, 55);--warning-color: rgb(224, 142, 42);--warning-color-hover: rgb(255, 167, 60);--border-color-invalid: rgb(255, 80, 50);--box-shadow-color-invalid: none;--tab-color: rgb(202, 204, 206);--tab-hover-color: rgb(196, 198, 200);--tab-panel-bg: rgb(206, 208, 210);--tab-list-bg: rgb(235, 185, 5);--tab-list-text: rgb(0, 0, 0);--tab-list-text-inactive: rgb(106, 108, 110);--btn-base-color: rgb(184, 186, 188);--btn-base-color-hover: rgb(230, 232, 234);--btn-load-more: rgb(202, 204, 206);--btn-load-more-hover: rgb(178, 180, 182);--btn-svg-color: rgb(0, 0, 0);--btn-delete-image: rgb(213, 49, 49);--btn-checkbox-border-hover: rgb(176, 178, 182);--progress-bar-color: rgb(235, 185, 5);--prompt-bg-color: rgb(225, 227, 229);--switch-bg-color: rgb(178, 180, 182);--switch-bg-active-color: rgb(235, 185, 5);--slider-color: var(--accent-color);--slider-mark-color: rgb(0, 0, 0);--resizeable-handle-border-color: rgb(160, 162, 164);--metadata-bg-color: rgba(230, 230, 230, .9);--metadata-json-bg-color: rgba(0, 0, 0, .1);--status-good-color: rgb(21, 126, 0);--status-good-glow: var(--background-color);--status-working-color: rgb(235, 141, 0);--status-working-glow: var(--background-color);--status-bad-color: rgb(202, 0, 0);--status-bad-glow: var(--background-color);--notice-color: rgb(255, 71, 90);--settings-modal-bg: rgb(202, 204, 206);--input-checkbox-bg: rgb(167, 167, 171);--input-checkbox-checked-bg: rgb(235, 185, 5);--input-checkbox-checked-tick: rgb(0, 0, 0);--input-border-color: rgb(0, 0, 0);--input-box-shadow-color: none;--error-level-info: rgb(42, 42, 42);--error-level-warning: rgb(173, 121, 0);--error-level-error: rgb(145, 14, 0);--console-bg-color: rgb(220, 224, 230);--console-icon-button-bg-color: var(--switch-bg-color);--console-icon-button-bg-color-hover: var(--resizeable-handle-border-color);--img2img-img-bg-color: rgb(180, 182, 184);--context-menu-bg-color: var(--background-color);--context-menu-box-shadow: 0px 10px 38px -10px rgba(22, 23, 24, .35), 0px 10px 20px -15px rgba(22, 23, 24, .2);--context-menu-bg-color-hover: var(--background-color-secondary);--floating-button-drop-shadow-color: rgba(0, 0, 0, .7);--inpainting-alerts-bg: rgba(220, 222, 224, .75);--inpainting-alerts-icon-color: rgb(0, 0, 0);--inpainting-alerts-bg-active: rgb(255, 200, 0);--inpainting-alerts-icon-active: rgb(0, 0, 0);--inpainting-alerts-bg-alert: var(--invalid);--inpainting-alerts-icon-alert: rgb(0, 0, 0);--checkboard-dots-color: rgb(160, 160, 172);--scrollbar-color: rgb(180, 180, 184);--scrollbar-color-hover: rgb(150, 150, 154)}[data-theme=green]{--white: rgb(255, 255, 255);--accent-color-dim: rgb(10, 60, 40);--accent-color: rgb(20, 110, 70);--accent-color-bright: rgb(30, 180, 100);--accent-color-hover: var(--accent-color-bright);--root-bg-color: rgb(10, 10, 14);--background-color: rgb(30, 32, 37);--background-color-light: rgb(40, 44, 48);--background-color-secondary: rgb(22, 24, 28);--text-color: rgb(255, 255, 255);--text-color-secondary: rgb(160, 164, 168);--subtext-color: rgb(24, 24, 28);--subtext-color-bright: rgb(68, 72, 76);--border-color: rgb(40, 44, 48);--border-color-light: rgb(60, 60, 64);--svg-color: rgb(220, 224, 228);--invalid: rgb(255, 75, 75);--invalid-secondary: rgb(120, 5, 5);--destructive-color: rgb(185, 55, 55);--destructive-color-hover: rgb(255, 75, 75);--warning-color: rgb(200, 88, 40);--warning-color-hover: rgb(230, 117, 60);--border-color-invalid: rgb(255, 80, 50);--box-shadow-color-invalid: rgb(210, 30, 10);--tab-color: rgb(40, 44, 48);--tab-hover-color: rgb(48, 52, 56);--tab-panel-bg: rgb(36, 40, 44);--tab-list-bg: var(--accent-color);--tab-list-text: rgb(202, 204, 206);--tab-list-text-inactive: rgb(92, 94, 96);--btn-base-color: rgb(40, 44, 48);--btn-base-color-hover: rgb(56, 60, 64);--btn-load-more: rgb(30, 32, 42);--btn-load-more-hover: rgb(54, 56, 66);--btn-svg-color: rgb(255, 255, 255);--btn-delete-image: rgb(182, 46, 46);--btn-checkbox-border-hover: rgb(46, 48, 68);--progress-bar-color: var(--accent-color);--prompt-bg-color: rgb(10, 10, 14);--switch-bg-color: rgb(100, 102, 110);--switch-bg-active-color: var(--accent-color);--slider-color: var(--accent-color-bright);--slider-mark-color: var(--accent-color-bright);--resizeable-handle-border-color: var(--accent-color);--metadata-bg-color: rgba(0, 0, 0, .7);--metadata-json-bg-color: rgba(255, 255, 255, .1);--status-good-color: rgb(125, 255, 100);--status-good-glow: rgb(40, 215, 40);--status-working-color: rgb(255, 175, 55);--status-working-glow: rgb(255, 160, 55);--status-bad-color: rgb(255, 90, 90);--status-bad-glow: rgb(255, 40, 40);--notice-color: rgb(130, 71, 19);--settings-modal-bg: rgb(30, 32, 42);--input-checkbox-bg: rgb(60, 64, 68);--input-checkbox-checked-bg: var(--accent-color);--input-checkbox-checked-tick: rgb(0, 0, 0);--input-border-color: var(--accent-color-bright);--input-box-shadow-color: var(--accent-color);--error-level-info: rgb(200, 202, 224);--error-level-warning: rgb(255, 225, 105);--error-level-error: rgb(255, 81, 46);--console-bg-color: rgb(30, 30, 36);--console-icon-button-bg-color: rgb(50, 53, 64);--console-icon-button-bg-color-hover: rgb(70, 73, 84);--img2img-img-bg-color: rgb(30, 32, 42);--context-menu-bg-color: rgb(46, 48, 58);--context-menu-box-shadow: none;--context-menu-bg-color-hover: rgb(30, 32, 42);--floating-button-drop-shadow-color: var(--accent-color);--inpainting-alerts-bg: rgba(20, 20, 26, .75);--inpainting-alerts-icon-color: rgb(255, 255, 255);--inpainting-alerts-bg-active: var(--accent-color);--inpainting-alerts-icon-active: rgb(255, 255, 255);--inpainting-alerts-bg-alert: var(--invalid);--inpainting-alerts-icon-alert: rgb(255, 255, 255);--checkboard-dots-color: rgb(35, 35, 39);--scrollbar-color: var(--accent-color);--scrollbar-color-hover: var(--accent-color-bright)}@media (max-width: 600px){#root .app-content{padding:5px}#root .app-content .site-header{position:fixed;display:flex;height:100px;z-index:1}#root .app-content .site-header .site-header-left-side{position:absolute;display:flex;min-width:145px;float:left;padding-left:0}#root .app-content .site-header .site-header-right-side{display:grid;grid-template-columns:1fr 1fr 1fr 1fr 1fr 1fr;grid-template-rows:25px 25px 25px;grid-template-areas:"logoSpace logoSpace logoSpace sampler sampler sampler" "status status status status status status" "btn1 btn2 btn3 btn4 btn5 btn6";row-gap:15px}#root .app-content .site-header .site-header-right-side .chakra-popover__popper{grid-area:logoSpace}#root .app-content .site-header .site-header-right-side>:nth-child(1).chakra-text{grid-area:status;width:100%;display:flex;justify-content:center}#root .app-content .site-header .site-header-right-side>:nth-child(2){grid-area:sampler;display:flex;justify-content:center;align-items:center}#root .app-content .site-header .site-header-right-side>:nth-child(2) select{width:185px;margin-top:10px}#root .app-content .site-header .site-header-right-side>:nth-child(2) .chakra-select__icon-wrapper{right:10px}#root .app-content .site-header .site-header-right-side>:nth-child(2) .chakra-select__icon-wrapper svg{margin-top:10px}#root .app-content .site-header .site-header-right-side>:nth-child(3){grid-area:btn1}#root .app-content .site-header .site-header-right-side>:nth-child(4){grid-area:btn2}#root .app-content .site-header .site-header-right-side>:nth-child(6){grid-area:btn3}#root .app-content .site-header .site-header-right-side>:nth-child(7){grid-area:btn4}#root .app-content .site-header .site-header-right-side>:nth-child(8){grid-area:btn5}#root .app-content .site-header .site-header-right-side>:nth-child(9){grid-area:btn6}#root .app-content .app-tabs{position:fixed;display:flex;flex-direction:column;row-gap:15px;max-width:100%;overflow:hidden;margin-top:120px}#root .app-content .app-tabs .app-tabs-list{display:flex;justify-content:space-between}#root .app-content .app-tabs .app-tabs-panels{overflow:hidden;overflow-y:scroll}#root .app-content .app-tabs .app-tabs-panels .workarea-main{display:grid;grid-template-areas:"workarea" "options" "gallery";row-gap:15px}#root .app-content .app-tabs .app-tabs-panels .workarea-main .options-panel-wrapper{grid-area:options;width:100%;max-width:100%;height:inherit;overflow:inherit;padding:0 10px}#root .app-content .app-tabs .app-tabs-panels .workarea-main .options-panel-wrapper .main-options-row,#root .app-content .app-tabs .app-tabs-panels .workarea-main .options-panel-wrapper .advanced-settings-item{max-width:100%}#root .app-content .app-tabs .app-tabs-panels .workarea-main .workarea-children-wrapper{grid-area:workarea}#root .app-content .app-tabs .app-tabs-panels .workarea-main .workarea-children-wrapper .workarea-split-view{display:flex;flex-direction:column}#root .app-content .app-tabs .app-tabs-panels .workarea-main .workarea-children-wrapper .current-image-options{column-gap:3px}#root .app-content .app-tabs .app-tabs-panels .workarea-main .workarea-children-wrapper .text-to-image-area{padding:0}#root .app-content .app-tabs .app-tabs-panels .workarea-main .workarea-children-wrapper .current-image-preview{height:430px}#root .app-content .app-tabs .app-tabs-panels .workarea-main .workarea-children-wrapper .image-upload-button{row-gap:10px;padding:5px}#root .app-content .app-tabs .app-tabs-panels .workarea-main .workarea-children-wrapper .image-upload-button svg{width:2rem;height:2rem;margin-top:10px}#root .app-content .app-tabs .app-tabs-panels .workarea-main .workarea-children-wrapper .inpainting-settings{display:flex;flex-wrap:wrap;row-gap:10px}#root .app-content .app-tabs .app-tabs-panels .workarea-main .workarea-children-wrapper .inpainting-canvas-area .konvajs-content{height:400px!important}#root .app-content .app-tabs .app-tabs-panels .workarea-main .image-gallery-wrapper{grid-area:gallery;min-height:400px}#root .app-content .app-tabs .app-tabs-panels .workarea-main .image-gallery-wrapper .image-gallery-popup{width:100%!important;max-width:100%!important}}svg{fill:var(--svg-color)}.App{display:grid;width:100vw;height:100vh;background-color:var(--background-color)}.app-content{display:grid;row-gap:1rem;padding:1rem;grid-auto-rows:min-content auto;width:calc(100vw + -0px);height:calc(100vh - .3rem)}.site-header{display:grid;grid-template-columns:auto max-content}.site-header-left-side{display:flex;align-items:center;column-gap:.7rem;padding-left:.5rem}.site-header-left-side img{width:32px;height:32px}.site-header-right-side{display:flex;align-items:center;column-gap:.5rem}.site-header-right-side .lang-select-btn[data-selected=true],.site-header-right-side .lang-select-btn[data-selected=true]:hover{background-color:var(--accent-color)}.status{font-size:.8rem;font-weight:700}.status-good{color:var(--status-good-color);text-shadow:0 0 10px var(--status-good-glow)}.status-bad{color:var(--status-bad-color);text-shadow:0 0 10px var(--status-bad-glow)}.status-working{color:var(--status-working-color);text-shadow:0 0 10px var(--status-working-glow)}.settings-modal{max-height:36rem;font-family:Inter}.settings-modal .settings-modal-content{display:grid;row-gap:2rem;overflow-y:scroll}.settings-modal .settings-modal-header{font-weight:700}.settings-modal .settings-modal-items{display:grid;row-gap:.5rem}.settings-modal .settings-modal-items .settings-modal-item{display:grid;grid-auto-flow:column;background-color:var(--background-color);padding:.4rem 1rem;border-radius:.5rem;align-items:center;width:100%}.settings-modal .settings-modal-reset{display:grid;row-gap:1rem}.settings-modal .settings-modal-reset button{min-width:100%;min-height:100%;background-color:var(--destructive-color)!important}.settings-modal .settings-modal-reset button:hover{background-color:var(--destructive-color-hover)}.settings-modal .settings-modal-reset button:disabled{background-color:var(--btn-base-color)}.settings-modal .settings-modal-reset button:disabled:hover{background-color:var(--btn-base-color)}.settings-modal .settings-modal-reset button svg{width:20px;height:20px;color:var(--btn-svg-color)}.add-model-modal{display:flex}.add-model-modal-body{display:flex;flex-direction:column;row-gap:1rem;padding-bottom:2rem}.add-model-form{display:flex;flex-direction:column;row-gap:.5rem}.hotkeys-modal{width:36rem;max-width:36rem;display:grid;padding:1rem;row-gap:1rem;font-family:Inter}.hotkeys-modal h1{font-size:1.2rem;font-weight:700}.hotkeys-modal h2{font-weight:700}.hotkeys-modal-button{display:flex;align-items:center;justify-content:space-between}.hotkeys-modal-items{max-height:36rem;overflow-y:scroll;-ms-overflow-style:none;scrollbar-width:none}.hotkeys-modal-items::-webkit-scrollbar{display:none}.hotkeys-modal-items .chakra-accordion{display:grid;row-gap:.5rem}.hotkeys-modal-items .chakra-accordion__item{border:none;border-radius:.3rem;background-color:var(--tab-hover-color)}.hotkeys-modal-items button{border-radius:.3rem}.hotkeys-modal-items button[aria-expanded=true]{background-color:var(--tab-hover-color);border-radius:.3rem}.hotkey-modal-category{display:grid;row-gap:.5rem}.hotkey-modal-item{display:grid;grid-template-columns:auto max-content;justify-content:space-between;align-items:center;background-color:var(--background-color);padding:.5rem 1rem;border-radius:.3rem}.hotkey-modal-item .hotkey-info{display:grid}.hotkey-modal-item .hotkey-info .hotkey-title{font-weight:700}.hotkey-modal-item .hotkey-info .hotkey-description{font-size:.9rem;color:var(--text-color-secondary)}.hotkey-modal-item .hotkey-key{font-size:.8rem;font-weight:700;background-color:var(--background-color-light);padding:.2rem .5rem;border-radius:.3rem}.console{width:100vw;display:flex;flex-direction:column;background:var(--console-bg-color);overflow:auto;direction:column;font-family:monospace;padding:0 1rem 1rem 3rem;border-top-width:.3rem;border-color:var(--resizeable-handle-border-color)}.console .console-info-color{color:var(--error-level-info)}.console .console-warning-color{color:var(--error-level-warning)}.console .console-error-color{color:var(--status-bad-color)}.console .console-entry{display:flex;column-gap:.5rem}.console .console-entry .console-timestamp{font-weight:semibold}.console .console-entry .console-message{word-break:break-all}.console-toggle-icon-button{background:var(--console-icon-button-bg-color);position:fixed;left:.5rem;bottom:.5rem;z-index:10000}.console-toggle-icon-button:hover{background:var(--console-icon-button-bg-color-hover)}.console-toggle-icon-button[data-error-seen=true],.console-toggle-icon-button[data-error-seen=true]:hover{background:var(--status-bad-color)}.console-autoscroll-icon-button{background:var(--console-icon-button-bg-color);position:fixed;left:.5rem;bottom:3rem;z-index:10000}.console-autoscroll-icon-button:hover{background:var(--console-icon-button-bg-color-hover)}.console-autoscroll-icon-button[data-autoscroll-enabled=true]{background:var(--accent-color)}.console-autoscroll-icon-button[data-autoscroll-enabled=true]:hover{background:var(--accent-color-hover)}.prompt-bar{display:grid;row-gap:1rem}.prompt-bar input,.prompt-bar textarea{background-color:var(--prompt-bg-color);font-size:1rem;border:2px solid var(--border-color)}.prompt-bar input:hover,.prompt-bar textarea:hover{border:2px solid var(--border-color-light)}.prompt-bar input:focus-visible,.prompt-bar textarea:focus-visible{border:2px solid var(--input-border-color);box-shadow:0 0 10px 0 var(--input-box-shadow-color)}.prompt-bar input[aria-invalid=true],.prompt-bar textarea[aria-invalid=true]{border:2px solid var(--border-color-invalid);box-shadow:0 0 10px 0 var(--box-shadow-color-invalid)}.prompt-bar input:disabled,.prompt-bar textarea:disabled{border:2px solid var(--border-color);box-shadow:none}.prompt-bar textarea{min-height:10rem}.process-buttons{display:flex;column-gap:.5rem}.invoke-btn{flex-grow:1;width:100%;min-width:100%;min-height:100%;background-color:var(--accent-color)!important}.invoke-btn:hover{background-color:var(--accent-color-hover)}.invoke-btn:disabled{background-color:var(--btn-base-color)}.invoke-btn:disabled:hover{background-color:var(--btn-base-color)}.invoke-btn svg{width:16px;height:16px;color:var(--btn-svg-color)}.cancel-btn{min-width:3rem;min-height:100%;background-color:var(--destructive-color)!important}.cancel-btn:hover{background-color:var(--destructive-color-hover)}.cancel-btn:disabled{background-color:var(--btn-base-color)}.cancel-btn:disabled:hover{background-color:var(--btn-base-color)}.cancel-btn svg{width:20px;height:20px;color:var(--btn-svg-color)}.loopback-btn[data-as-checkbox=true]{background-color:var(--btn-btn-base-color);border:3px solid var(--btn-btn-base-color)}.loopback-btn[data-as-checkbox=true] svg{fill:var(--text-color)}.loopback-btn[data-as-checkbox=true]:hover{background-color:var(--btn-btn-base-color);border-color:var(--btn-checkbox-border-hover)}.loopback-btn[data-as-checkbox=true]:hover svg{fill:var(--text-color)}.loopback-btn[data-as-checkbox=true][data-selected=true]{border-color:var(--accent-color);background-color:var(--btn-btn-base-color)}.loopback-btn[data-as-checkbox=true][data-selected=true] svg{fill:var(--text-color)}.loopback-btn[data-as-checkbox=true][data-selected=true]:hover{border-color:var(--accent-color);background-color:var(--btn-btn-base-color)}.loopback-btn[data-as-checkbox=true][data-selected=true]:hover svg{fill:var(--text-color)}.main-options,.main-options-list{display:grid;row-gap:1rem}.main-options-row{display:grid;grid-template-columns:repeat(3,auto);column-gap:.5rem;max-width:22.5rem}.main-option-block{border-radius:.5rem;display:grid!important;grid-template-columns:auto!important;row-gap:.5rem}.main-option-block .invokeai__number-input-form-label,.main-option-block .invokeai__select-label{font-weight:700;font-size:.9rem!important}.main-option-block .invokeai__select-label{margin:0}.advanced-options-checkbox{background-color:var(--background-color-secondary);padding:.5rem 1rem;border-radius:.4rem;font-weight:700}.advanced-settings{padding-top:.5rem;display:grid;row-gap:.5rem}.advanced-settings-item{display:grid;max-width:22.5rem;border:none;border-top:0px;border-radius:.4rem;background-color:var(--tab-panel-bg)}.advanced-settings-item[aria-expanded=true]{background-color:var(--tab-hover-color);border-radius:0 0 .4rem .4rem}.advanced-settings-panel{background-color:var(--tab-panel-bg);border-radius:0 0 .4rem .4rem;padding:1rem}.advanced-settings-panel button{background-color:var(--btn-base-color)}.advanced-settings-panel button:hover{background-color:var(--btn-base-color-hover)}.advanced-settings-panel button:disabled:hover{background-color:var(--btn-base-color)}.advanced-settings-header{border-radius:.4rem;font-weight:700}.advanced-settings-header[aria-expanded=true]{background-color:var(--tab-hover-color);border-radius:.4rem .4rem 0 0}.advanced-settings-header:hover{background-color:var(--tab-hover-color)}.upscale-options{display:grid;grid-template-columns:auto 1fr;column-gap:1rem}.inpainting-bounding-box-settings{display:flex;flex-direction:column;border-radius:.4rem;border:2px solid var(--tab-color)}.inpainting-bounding-box-header{background-color:var(--tab-color);display:flex;flex-direction:row;justify-content:space-between;padding:.5rem 1rem;border-radius:.3rem .3rem 0 0;align-items:center}.inpainting-bounding-box-header button{width:.5rem;height:1.2rem;background:none}.inpainting-bounding-box-header button:hover{background:none}.inpainting-bounding-box-settings-items{padding:1rem;display:flex;flex-direction:column;row-gap:1rem}.inpainting-bounding-box-settings-items .inpainting-bounding-box-reset-icon-btn{background-color:var(--btn-base-color)}.inpainting-bounding-box-settings-items .inpainting-bounding-box-reset-icon-btn:hover{background-color:var(--btn-base-color-hover)}.inpainting-bounding-box-dimensions-slider-numberinput{display:grid;grid-template-columns:repeat(3,auto);column-gap:1rem}.inpainting-bounding-box-darken{width:max-content}.progress-bar{background-color:var(--root-bg-color);height:.3rem;z-index:99}.progress-bar div{background-color:var(--progress-bar-color)}.progress-bar div[data-indeterminate]{background-color:unset;background-image:linear-gradient(to right,transparent 0%,var(--progress-bar-color) 50%,transparent 100%)}.current-image-area{display:flex;flex-direction:column;height:100%;row-gap:1rem;background-color:var(--background-color-secondary);border-radius:.5rem}.current-image-preview{position:relative;justify-content:center;align-items:center;display:flex;width:100%;height:100%}.current-image-preview img{border-radius:.5rem;object-fit:contain;max-width:100%;max-height:100%;height:auto;position:absolute}.current-image-metadata{grid-area:current-image-preview}.current-image-next-prev-buttons{grid-area:current-image-content;display:flex;justify-content:space-between;z-index:1;height:100%;width:100%;pointer-events:none}.next-prev-button-trigger-area{width:7rem;height:100%;width:15%;display:grid;align-items:center;pointer-events:auto}.next-prev-button-trigger-area.prev-button-trigger-area{justify-content:flex-start}.next-prev-button-trigger-area.next-button-trigger-area{justify-content:flex-end}.next-prev-button{font-size:4rem;fill:var(--white);filter:drop-shadow(0 0 1rem var(--text-color-secondary));opacity:70%}.current-image-display-placeholder{background-color:var(--background-color-secondary);display:grid;display:flex;align-items:center;justify-content:center;width:100%;height:100%;border-radius:.5rem}.current-image-display-placeholder svg{width:10rem;height:10rem;color:var(--svg-color)}.current-image-options{width:100%;display:flex;justify-content:center;align-items:center;column-gap:.5em}.current-image-options .current-image-send-to-popover,.current-image-options .current-image-postprocessing-popover{display:flex;flex-direction:column;row-gap:.5rem;max-width:25rem}.current-image-options .current-image-send-to-popover .invokeai__button{place-content:start}.current-image-options .chakra-popover__popper{z-index:11}.current-image-options .delete-image-btn{background-color:var(--btn-base-color)}.current-image-options .delete-image-btn svg{fill:var(--btn-delete-image)}.image-gallery-wrapper-enter{transform:translate(150%)}.image-gallery-wrapper-enter-active{transform:translate(0);transition:all .12s ease-out}.image-gallery-wrapper-exit{transform:translate(0)}.image-gallery-wrapper-exit-active{transform:translate(150%);transition:all .12s ease-out}.image-gallery-wrapper[data-pinned=false]{position:fixed;height:100vh;top:0;right:0}.image-gallery-wrapper[data-pinned=false] .image-gallery-popup{border-radius:0;box-shadow:0 0 1rem var(--text-color-a3)}.image-gallery-wrapper[data-pinned=false] .image-gallery-popup .image-gallery-container{max-height:calc(100vh + 4.7rem)}.image-gallery-wrapper .image-gallery-popup{background-color:var(--background-color-secondary);padding:1rem;display:flex;flex-direction:column;row-gap:1rem;border-radius:.5rem;border-left-width:.3rem;border-color:var(--tab-list-text-inactive)}.image-gallery-wrapper .image-gallery-popup[data-resize-alert=true]{border-color:var(--status-bad-color)}.image-gallery-wrapper .image-gallery-popup .image-gallery-header{display:flex;align-items:center;column-gap:.5rem;justify-content:space-between}.image-gallery-wrapper .image-gallery-popup .image-gallery-header .image-gallery-header-right-icons{display:flex;flex-direction:row;column-gap:.5rem}.image-gallery-wrapper .image-gallery-popup .image-gallery-header .image-gallery-icon-btn{background-color:var(--btn-load-more)}.image-gallery-wrapper .image-gallery-popup .image-gallery-header .image-gallery-icon-btn:hover{background-color:var(--btn-load-more-hover)}.image-gallery-wrapper .image-gallery-popup .image-gallery-header .image-gallery-settings-popover{display:flex;flex-direction:column;row-gap:.5rem}.image-gallery-wrapper .image-gallery-popup .image-gallery-header .image-gallery-settings-popover div{display:flex;column-gap:.5rem;align-items:center;justify-content:space-between}.image-gallery-wrapper .image-gallery-popup .image-gallery-header h1{font-weight:700}.image-gallery-wrapper .image-gallery-popup .image-gallery-container{display:flex;flex-direction:column;max-height:calc(100vh - (70px + 7rem));overflow-y:scroll;-ms-overflow-style:none;scrollbar-width:none}.image-gallery-wrapper .image-gallery-popup .image-gallery-container::-webkit-scrollbar{display:none}.image-gallery-wrapper .image-gallery-popup .image-gallery-container .image-gallery-container-placeholder{display:flex;flex-direction:column;row-gap:.5rem;background-color:var(--background-color);border-radius:.5rem;place-items:center;padding:2rem;text-align:center}.image-gallery-wrapper .image-gallery-popup .image-gallery-container .image-gallery-container-placeholder p{color:var(--subtext-color-bright);font-family:Inter}.image-gallery-wrapper .image-gallery-popup .image-gallery-container .image-gallery-container-placeholder svg{width:4rem;height:4rem;color:var(--svg-color)}.image-gallery-wrapper .image-gallery-popup .image-gallery-container .image-gallery-load-more-btn{background-color:var(--btn-load-more);font-size:.85rem;padding:.5rem;margin-top:1rem}.image-gallery-wrapper .image-gallery-popup .image-gallery-container .image-gallery-load-more-btn:disabled:hover{background-color:var(--btn-load-more)}.image-gallery-wrapper .image-gallery-popup .image-gallery-container .image-gallery-load-more-btn:hover{background-color:var(--btn-load-more-hover)}.image-gallery-category-btn-group{width:max-content;column-gap:0;justify-content:stretch}.image-gallery-category-btn-group button{background-color:var(--btn-base-color);flex-grow:1}.image-gallery-category-btn-group button:hover{background-color:var(--btn-base-color-hover)}.image-gallery-category-btn-group button[data-selected=true]{background-color:var(--accent-color)}.image-gallery-category-btn-group button[data-selected=true]:hover{background-color:var(--accent-color-hover)}.image-gallery{display:grid;grid-gap:.5rem}.image-gallery .hoverable-image{padding:.5rem;position:relative}.image-gallery .hoverable-image:before{content:"";display:block;padding-bottom:100%}.image-gallery .hoverable-image .hoverable-image-image{position:absolute;max-width:100%;top:50%;left:50%;transform:translate(-50%,-50%)}.hoverable-image{display:flex;justify-content:center;transition:transform .2s ease-out}.hoverable-image:hover{cursor:pointer;border-radius:.5rem;z-index:2}.hoverable-image .hoverable-image-image{width:100%;height:100%;max-width:100%;max-height:100%}.hoverable-image .hoverable-image-delete-button{position:absolute;top:.25rem;right:.25rem}.hoverable-image .hoverable-image-content{display:flex;position:absolute;top:0;left:0;width:100%;height:100%;align-items:center;justify-content:center}.hoverable-image .hoverable-image-content .hoverable-image-check{fill:var(--status-good-color)}.hoverable-image .hoverable-image-icons{position:absolute;bottom:-2rem;display:grid;width:min-content;grid-template-columns:repeat(2,max-content);border-radius:.4rem;background-color:var(--background-color-secondary);padding:.2rem;gap:.2rem;grid-auto-rows:max-content}.hoverable-image .hoverable-image-icons button{width:12px;height:12px;border-radius:.2rem;padding:10px 0;flex-shrink:2}.hoverable-image .hoverable-image-icons button svg{width:12px;height:12px}.hoverable-image-context-menu{z-index:15;padding:.4rem;border-radius:.25rem;background-color:var(--context-menu-bg-color);box-shadow:var(--context-menu-box-shadow)}.hoverable-image-context-menu [role=menuitem]{font-size:.8rem;line-height:1rem;border-radius:3px;display:flex;align-items:center;height:1.75rem;padding:0 .5rem;position:relative;user-select:none;cursor:pointer;outline:none}.hoverable-image-context-menu [role=menuitem][data-disabled]{color:gray;pointer-events:none;cursor:not-allowed}.hoverable-image-context-menu [role=menuitem][data-warning]{color:var(--status-bad-color)}.hoverable-image-context-menu [role=menuitem][data-highlighted]{background-color:var(--context-menu-bg-color-hover)}.image-metadata-viewer{position:absolute;top:0;width:100%;border-radius:.5rem;padding:1rem;background-color:var(--metadata-bg-color);overflow:scroll;max-height:calc(100vh - (70px + 5.4rem));height:100%;z-index:10}.image-json-viewer{border-radius:.5rem;margin:0 .5rem 1rem;padding:1rem;overflow-x:scroll;word-break:break-all;background-color:var(--metadata-json-bg-color)}.lightbox-container{width:100%;height:100%;color:var(--text-color);overflow:hidden;position:absolute;left:0;top:0;background-color:var(--background-color-secondary);z-index:30;animation:popIn .3s ease-in}.lightbox-container .image-gallery-wrapper{max-height:100%!important}.lightbox-container .image-gallery-wrapper .image-gallery-container{max-height:calc(100vh - 5rem)}.lightbox-container .current-image-options{z-index:2;position:absolute;top:1rem}.lightbox-container .image-metadata-viewer{left:0;max-height:100%}.lightbox-close-btn{z-index:3;position:absolute;left:1rem;top:1rem;background-color:var(--btn-base-color)}.lightbox-close-btn:hover{background-color:var(--btn-base-color-hover)}.lightbox-close-btn:disabled:hover{background-color:var(--btn-base-color)}.lightbox-display-container{display:flex;flex-direction:row}.lightbox-preview-wrapper{overflow:hidden;background-color:var(--background-color-secondary);display:grid;grid-template-columns:auto max-content;place-items:center;width:100vw;height:100vh}.lightbox-preview-wrapper .current-image-next-prev-buttons{position:absolute}.lightbox-preview-wrapper .lightbox-image{grid-area:lightbox-content;border-radius:.5rem}.lightbox-preview-wrapper .lightbox-image-options{position:absolute;z-index:2;left:1rem;top:4.5rem;user-select:none;border-radius:.5rem;display:flex;flex-direction:column;row-gap:.5rem}@keyframes popIn{0%{opacity:0;filter:blur(100)}to{opacity:1;filter:blur(0)}}.app-tabs{display:grid;grid-template-columns:min-content auto;column-gap:1rem;height:calc(100vh - (70px + 1rem))}.app-tabs-list{display:grid;row-gap:.3rem;grid-auto-rows:min-content;color:var(--tab-list-text-inactive)}.app-tabs-list button{font-size:.85rem;padding:.5rem}.app-tabs-list button:hover{background-color:var(--tab-hover-color);border-radius:.3rem}.app-tabs-list button svg{width:24px;height:24px}.app-tabs-list button[aria-selected=true]{background-color:var(--tab-list-bg);color:var(--tab-list-text);font-weight:700;border-radius:.3rem;border:none}.app-tabs-panels .app-tabs-panel{padding:0;height:100%}.workarea-wrapper{position:relative;width:100%;height:100%}.workarea-wrapper .workarea-main{display:flex;column-gap:1rem;height:100%}.workarea-wrapper .workarea-main .workarea-children-wrapper{position:relative;width:100%;height:100%}.workarea-wrapper .workarea-main .workarea-split-view{width:100%;height:100%;display:grid;grid-template-columns:1fr 1fr;background-color:var(--background-color-secondary);border-radius:.5rem}.workarea-wrapper .workarea-main .workarea-split-view .workarea-split-view-left{padding-right:.5rem}.workarea-wrapper .workarea-main .workarea-split-view .workarea-split-view-right{padding-left:.5rem}.workarea-wrapper .workarea-main .workarea-single-view{width:100%;height:100%;background-color:var(--background-color-secondary);border-radius:.5rem}.workarea-wrapper .workarea-main .workarea-split-view-left,.workarea-wrapper .workarea-main .workarea-split-view-right{display:flex;flex-direction:column;height:100%;width:100%;row-gap:1rem;background-color:var(--background-color-secondary);border-radius:.5rem;padding:1rem}.workarea-split-button{position:absolute;cursor:pointer;padding:.5rem;top:0;right:0}.workarea-split-button[data-selected=true]{top:0;right:0}.workarea-split-button[data-selected=true] svg{opacity:1}.workarea-split-button svg{opacity:.5}.options-panel-wrapper-enter{transform:translate(-150%)}.options-panel-wrapper-enter-active{transform:translate(0);transition:all .12s ease-out}.options-panel-wrapper-exit{transform:translate(0)}.options-panel-wrapper-exit-active{transform:translate(-150%);transition:all .12s ease-out}.options-panel-wrapper{background-color:var(--background-color);height:calc(100vh - (70px + 1rem));width:22.5rem;max-width:22.5rem;flex-shrink:0;position:relative;overflow-y:scroll;-ms-overflow-style:none;scrollbar-width:none}.options-panel-wrapper::-webkit-scrollbar{display:none}.options-panel-wrapper .options-panel{display:flex;flex-direction:column;row-gap:1rem;height:100%;-ms-overflow-style:none;scrollbar-width:none;background-color:var(--background-color)}.options-panel-wrapper .options-panel::-webkit-scrollbar{display:none}.options-panel-wrapper[data-pinned=false]{z-index:20;position:fixed;top:0;left:0;filter:var(--floating-panel-drop-shadow);width:24.5rem;max-width:24.5rem;height:100%}.options-panel-wrapper[data-pinned=false] .options-panel-margin{margin:1rem}.options-panel-wrapper .options-panel-pin-button{position:absolute;cursor:pointer;padding:.5rem;top:1rem;right:1rem;z-index:20}.options-panel-wrapper .options-panel-pin-button[data-selected=true]{top:0;right:0}.options-panel-wrapper .options-panel-pin-button svg{opacity:.5}.invoke-ai-logo-wrapper{display:flex;align-items:center;column-gap:.7rem;padding-left:.5rem;padding-top:.3rem}.invoke-ai-logo-wrapper img{width:32px;height:32px}.invoke-ai-logo-wrapper h1{font-size:1.4rem}.text-to-image-area{padding:1rem;height:100%}.image-to-image-area{display:flex;flex-direction:column;row-gap:1rem;width:100%;height:100%}.image-to-image-strength-main-option{display:flex;row-gap:.5rem!important}.image-to-image-strength-main-option .invokeai__slider-component-label{color:var(--text-color-secondary);font-size:.9rem!important}.init-image-preview-header{display:flex;align-items:center;justify-content:space-between;width:100%}.init-image-preview-header h2{font-weight:700;font-size:.9rem}.init-image-preview{position:relative;height:100%;width:100%;display:flex;align-items:center;justify-content:center}.init-image-preview img{border-radius:.5rem;object-fit:contain;position:absolute}.image-to-image-current-image-display{position:relative}.floating-show-hide-button{position:absolute;top:50%;transform:translateY(-50%);z-index:20;padding:0;background-color:red!important;min-width:2rem;min-height:12rem;background-color:var(--btn-btn-base-color)!important}.floating-show-hide-button.left{left:0;border-radius:0 .5rem .5rem 0}.floating-show-hide-button.right{right:0;border-radius:.5rem 0 0 .5rem}.floating-show-hide-button:hover{background-color:var(--btn-btn-base-color-hover)}.floating-show-hide-button:disabled{background-color:var(--btn-base-color)}.floating-show-hide-button:disabled:hover{background-color:var(--btn-base-color)}.floating-show-hide-button svg{width:20px;height:20px;color:var(--btn-svg-color)}.show-hide-button-options{position:absolute;transform:translateY(-50%);z-index:20;min-width:2rem;top:50%;left:calc(42px + 2rem);border-radius:0 .5rem .5rem 0;display:flex;flex-direction:column;row-gap:.5rem}.show-hide-button-options button{border-radius:0 .3rem .3rem 0}.show-hide-button-gallery{padding-left:.75rem;padding-right:.75rem;background-color:var(--background-color)!important}.inpainting-main-area{display:flex;flex-direction:column;align-items:center;row-gap:1rem;width:100%;height:100%}.inpainting-main-area .inpainting-settings{display:flex;align-items:center;column-gap:.5rem}.inpainting-main-area .inpainting-settings svg{transform:scale(.9)}.inpainting-main-area .inpainting-settings .inpainting-buttons-group{display:flex;align-items:center;column-gap:.5rem}.inpainting-main-area .inpainting-settings .inpainting-button-dropdown{display:flex;flex-direction:column;row-gap:.5rem}.inpainting-main-area .inpainting-settings .inpainting-color-picker{margin-left:1rem}.inpainting-main-area .inpainting-settings .inpainting-brush-options{display:flex;align-items:center;column-gap:1rem}.inpainting-canvas-area{display:flex;flex-direction:column;align-items:center;justify-content:center;row-gap:1rem;width:100%;height:100%}.inpainting-canvas-spiner{display:flex;align-items:center;width:100%;height:100%}.inpainting-canvas-container{display:flex;position:relative;height:100%;width:100%;border-radius:.5rem}.inpainting-canvas-wrapper{position:relative}.inpainting-canvas-stage{outline:none;border-radius:.5rem;box-shadow:0 0 0 1px var(--border-color-light);overflow:hidden}.inpainting-canvas-stage canvas{outline:none;border-radius:.5rem}.inpainting-options-btn{min-height:2rem}.canvas-status-text{position:absolute;top:0;left:0;background-color:var(--background-color);opacity:.65;display:flex;flex-direction:column;font-size:.8rem;padding:.25rem;min-width:12rem;border-radius:.25rem;margin:.25rem;pointer-events:none}.invokeai__number-input-form-control{display:flex;align-items:center;column-gap:1rem}.invokeai__number-input-form-control .invokeai__number-input-form-label{color:var(--text-color-secondary)}.invokeai__number-input-form-control .invokeai__number-input-form-label[data-focus]+.invokeai__number-input-root{outline:none;border:2px solid var(--input-border-color);box-shadow:0 0 10px 0 var(--input-box-shadow-color)}.invokeai__number-input-form-control .invokeai__number-input-form-label[aria-invalid=true]+.invokeai__number-input-root{outline:none;border:2px solid var(--border-color-invalid);box-shadow:0 0 10px 0 var(--box-shadow-color-invalid)}.invokeai__number-input-form-control .invokeai__number-input-root{height:2rem;display:grid;grid-template-columns:auto max-content;column-gap:.5rem;align-items:center;background-color:var(--background-color-secondary);border:2px solid var(--border-color);border-radius:.3rem}.invokeai__number-input-form-control .invokeai__number-input-field{border:none;font-weight:700;width:100%;height:auto;font-size:.9rem;padding:0 .5rem}.invokeai__number-input-form-control .invokeai__number-input-field:focus{outline:none;box-shadow:none}.invokeai__number-input-form-control .invokeai__number-input-field:disabled{opacity:.2}.invokeai__number-input-form-control .invokeai__number-input-stepper{display:grid;padding-right:.5rem}.invokeai__number-input-form-control .invokeai__number-input-stepper .invokeai__number-input-stepper-button{border:none;padding:0 .5rem;margin:0 -.5rem}.invokeai__number-input-form-control .invokeai__number-input-stepper .invokeai__number-input-stepper-button svg{width:10px;height:10px}.input{display:grid;grid-template-columns:max-content auto;column-gap:1rem;align-items:center}.input .input-label{color:var(--text-color-secondary)}.input .input-entry{background-color:var(--background-color-secondary);border:2px solid var(--border-color);border-radius:.2rem;font-weight:700}.input .input-entry:focus{outline:none;border:2px solid var(--input-border-color);box-shadow:0 0 10px 0 var(--input-box-shadow-color)}.input .input-entry:disabled{opacity:.2}.input .input-entry[aria-invalid=true]{outline:none;border:2px solid var(--border-color-invalid);box-shadow:0 0 10px 0 var(--box-shadow-color-invalid)}.invokeai__icon-button{background:var(--btn-base-color);cursor:pointer}.invokeai__icon-button:hover{background-color:var(--btn-base-color-hover)}.invokeai__icon-button[data-selected=true]{background-color:var(--accent-color)}.invokeai__icon-button[data-selected=true]:hover{background-color:var(--accent-color-hover)}.invokeai__icon-button[disabled]{cursor:not-allowed}.invokeai__icon-button[data-variant=link],.invokeai__icon-button[data-variant=link]:hover{background:none}.invokeai__icon-button[data-as-checkbox=true]{background-color:var(--btn-base-color);border:3px solid var(--btn-base-color)}.invokeai__icon-button[data-as-checkbox=true] svg{fill:var(--text-color)}.invokeai__icon-button[data-as-checkbox=true]:hover{background-color:var(--btn-base-color);border-color:var(--btn-checkbox-border-hover)}.invokeai__icon-button[data-as-checkbox=true]:hover svg{fill:var(--text-color)}.invokeai__icon-button[data-as-checkbox=true][data-selected=true]{border-color:var(--accent-color)}.invokeai__icon-button[data-as-checkbox=true][data-selected=true] svg{fill:var(--accent-color-hover)}.invokeai__icon-button[data-as-checkbox=true][data-selected=true]:hover svg{fill:var(--accent-color-hover)}.invokeai__icon-button[data-alert=true]{animation-name:pulseColor;animation-duration:1s;animation-timing-function:ease-in-out;animation-iteration-count:infinite}.invokeai__icon-button[data-alert=true]:hover{animation:none;background-color:var(--accent-color-hover)}@keyframes pulseColor{0%{background-color:var(--accent-color)}50%{background-color:var(--accent-color-dim)}to{background-color:var(--accent-color)}}.invokeai__button{background-color:var(--btn-base-color);place-content:center}.invokeai__button:hover{background-color:var(--btn-base-color-hover)}.invokeai__switch-form-control .invokeai__switch-form-label{color:var(--text-color-secondary)}.invokeai__switch-form-control .invokeai__switch-root span{background-color:var(--switch-bg-color)}.invokeai__switch-form-control .invokeai__switch-root span span{background-color:var(--white)}.invokeai__switch-form-control .invokeai__switch-root[data-checked] span{background:var(--switch-bg-active-color)}.invokeai__switch-form-control .invokeai__switch-root[data-checked] span span{background-color:var(--white)}.invokeai__select{display:flex;column-gap:1rem;align-items:center}.invokeai__select .invokeai__select-label{color:var(--text-color-secondary)}.invokeai__select .invokeai__select-picker{border:2px solid var(--border-color);background-color:var(--background-color-secondary);font-weight:700;font-size:.9rem;height:2rem;border-radius:.2rem}.invokeai__select .invokeai__select-picker:focus{outline:none;border:2px solid var(--input-border-color);box-shadow:0 0 10px 0 var(--input-box-shadow-color)}.invokeai__select .invokeai__select-option{background-color:var(--background-color-secondary);color:var(--text-color-secondary)}.invokeai__slider-component{padding-bottom:.5rem;border-radius:.5rem}.invokeai__slider-component .invokeai__slider-component-label{min-width:max-content;margin:0;font-weight:700;font-size:.9rem;color:var(--text-color-secondary)}.invokeai__slider-component .invokeai__slider_track{background-color:var(--tab-color)}.invokeai__slider-component .invokeai__slider_track-filled{background-color:var(--slider-color)}.invokeai__slider-component .invokeai__slider-thumb{width:4px}.invokeai__slider-component .invokeai__slider-mark{font-size:.75rem;font-weight:700;color:var(--slider-mark-color);margin-top:.3rem}.invokeai__slider-component .invokeai__slider-number-input{border:none;font-size:.9rem;font-weight:700;height:2rem;background-color:var(--background-color-secondary);border:2px solid var(--border-color)}.invokeai__slider-component .invokeai__slider-number-input:focus{outline:none;box-shadow:none;border:2px solid var(--input-border-color);box-shadow:0 0 10px 0 var(--input-box-shadow-color)}.invokeai__slider-component .invokeai__slider-number-input:disabled{opacity:.2}.invokeai__slider-component .invokeai__slider-number-stepper{border:none}.invokeai__slider-component[data-markers=true] .invokeai__slider_container{margin-top:-1rem}.invokeai__checkbox .chakra-checkbox__label{margin-top:1px;color:var(--text-color-secondary);font-size:.9rem}.invokeai__checkbox .chakra-checkbox__control{width:1rem;height:1rem;border:none;border-radius:.2rem;background-color:var(--input-checkbox-bg)}.invokeai__checkbox .chakra-checkbox__control svg{width:.6rem;height:.6rem;stroke-width:3px}.invokeai__checkbox .chakra-checkbox__control[data-checked]{color:var(--text-color);background-color:var(--input-checkbox-checked-bg)}.invokeai__popover-content{min-width:unset;width:unset;padding:1rem;border-radius:.5rem;background-color:var(--background-color);border:2px solid var(--border-color)}.invokeai__popover-content .invokeai__popover-arrow{background-color:var(--background-color)!important}.invokeai__color-picker .react-colorful__hue-pointer,.invokeai__color-picker .react-colorful__saturation-pointer{width:1.5rem;height:1.5rem;border-color:var(--white)}.dropzone-container{position:absolute;top:0;left:0;width:100vw;height:100vh;z-index:999;backdrop-filter:blur(20px)}.dropzone-container .dropzone-overlay{opacity:.5;width:100%;height:100%;display:flex;flex-direction:column;row-gap:1rem;align-items:center;justify-content:center;background-color:var(--background-color)}.dropzone-container .dropzone-overlay.is-drag-accept{box-shadow:inset 0 0 20rem 1rem var(--accent-color)}.dropzone-container .dropzone-overlay.is-drag-reject{box-shadow:inset 0 0 20rem 1rem var(--status-bad-color)}.dropzone-container .dropzone-overlay.is-handling-upload{box-shadow:inset 0 0 20rem 1rem var(--status-working-color)}.image-uploader-button-outer{width:100%;height:100%;display:flex;align-items:center;justify-content:center;cursor:pointer;border-radius:.5rem;color:var(--tab-list-text-inactive);background-color:var(--background-color)}.image-uploader-button-outer:hover{background-color:var(--background-color-light)}.image-upload-button-inner{width:100%;height:100%;display:flex;align-items:center;justify-content:center}.image-upload-button{display:flex;flex-direction:column;row-gap:2rem;align-items:center;justify-content:center;text-align:center}.image-upload-button svg{width:4rem;height:4rem}.image-upload-button h2{font-size:1.2rem}.work-in-progress{display:grid;width:100%;height:calc(100vh - (70px + 1rem));grid-auto-rows:max-content;background-color:var(--background-color-secondary);border-radius:.4rem;place-content:center;place-items:center;row-gap:1rem}.work-in-progress h1{font-size:2rem;font-weight:700}.work-in-progress p{text-align:center;max-width:50rem;color:var(--subtext-color-bright)}.guide-popover-arrow{background-color:var(--tab-panel-bg);box-shadow:none}.guide-popover-content{background-color:var(--background-color-secondary);border:none}.guide-popover-guide-content{background:var(--tab-panel-bg);border:2px solid var(--tab-hover-color);border-radius:.4rem;padding:.75rem 1rem;display:grid;grid-template-rows:repeat(auto-fill,1fr);grid-row-gap:.5rem;justify-content:space-between}.modal{background-color:var(--background-color-secondary);color:var(--text-color);font-family:Inter}.modal-close-btn{background-color:var(--btn-base-color)}.modal-close-btn:hover{background-color:var(--btn-base-color-hover)}.modal-close-btn:disabled:hover{background-color:var(--btn-base-color)}*,*:before,*:after{box-sizing:border-box;margin:0;padding:0}html,body{-ms-overflow-style:none;scrollbar-width:none;background-color:var(--root-bg-color);overflow:hidden}html::-webkit-scrollbar,body::-webkit-scrollbar{display:none}#root{background-color:var(--root-bg-color);color:var(--text-color);font-family:Inter,Arial,Helvetica,sans-serif}\n',document.head.appendChild(r),{execute:function(){function r(e,t){for(var n=0;nr[t]})}}return Object.freeze(Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}))}var o="undefined"!=typeof globalThis?globalThis:"undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{};function i(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}var a={exports:{}},s={},l=Symbol.for("react.element"),c=Symbol.for("react.portal"),u=Symbol.for("react.fragment"),d=Symbol.for("react.strict_mode"),h=Symbol.for("react.profiler"),f=Symbol.for("react.provider"),p=Symbol.for("react.context"),g=Symbol.for("react.forward_ref"),m=Symbol.for("react.suspense"),v=Symbol.for("react.memo"),y=Symbol.for("react.lazy"),b=Symbol.iterator;var x={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},w=Object.assign,k={};function S(e,t,n){this.props=e,this.context=t,this.refs=k,this.updater=n||x}function C(){}function _(e,t,n){this.props=e,this.context=t,this.refs=k,this.updater=n||x}S.prototype.isReactComponent={},S.prototype.setState=function(e,t){if("object"!=typeof e&&"function"!=typeof e&&null!=e)throw Error("setState(...): takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,e,t,"setState")},S.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")},C.prototype=S.prototype;var E=_.prototype=new C;E.constructor=_,w(E,S.prototype),E.isPureReactComponent=!0;var L=Array.isArray,P=Object.prototype.hasOwnProperty,O={current:null},M={key:!0,ref:!0,__self:!0,__source:!0};function T(e,t,n){var r,o={},i=null,a=null;if(null!=t)for(r in void 0!==t.ref&&(a=t.ref),void 0!==t.key&&(i=""+t.key),t)P.call(t,r)&&!M.hasOwnProperty(r)&&(o[r]=t[r]);var s=arguments.length-2;if(1===s)o.children=n;else if(1>>1,i=e[r];if(!(0>>1;ro(l,n))co(u,l)?(e[r]=u,e[c]=n,r=c):(e[r]=l,e[s]=n,r=s);else{if(!(co(u,n)))break e;e[r]=u,e[c]=n,r=c}}}return t}function o(e,t){var n=e.sortIndex-t.sortIndex;return 0!==n?n:e.id-t.id}if("object"==typeof performance&&"function"==typeof performance.now){var i=performance;e.unstable_now=function(){return i.now()}}else{var a=Date,s=a.now();e.unstable_now=function(){return a.now()-s}}var l=[],c=[],u=1,d=null,h=3,f=!1,p=!1,g=!1,m="function"==typeof setTimeout?setTimeout:null,v="function"==typeof clearTimeout?clearTimeout:null,y="undefined"!=typeof setImmediate?setImmediate:null;function b(e){for(var o=n(c);null!==o;){if(null===o.callback)r(c);else{if(!(o.startTime<=e))break;r(c),o.sortIndex=o.expirationTime,t(l,o)}o=n(c)}}function x(e){if(g=!1,b(e),!p)if(null!==n(l))p=!0,A(w);else{var t=n(c);null!==t&&I(x,t.startTime-e)}}function w(t,o){p=!1,g&&(g=!1,v(_),_=-1),f=!0;var i=h;try{for(b(o),d=n(l);null!==d&&(!(d.expirationTime>o)||t&&!P());){var a=d.callback;if("function"==typeof a){d.callback=null,h=d.priorityLevel;var s=a(d.expirationTime<=o);o=e.unstable_now(),"function"==typeof s?d.callback=s:d===n(l)&&r(l),b(o)}else r(l);d=n(l)}if(null!==d)var u=!0;else{var m=n(c);null!==m&&I(x,m.startTime-o),u=!1}return u}finally{d=null,h=i,f=!1}}"undefined"!=typeof navigator&&void 0!==navigator.scheduling&&void 0!==navigator.scheduling.isInputPending&&navigator.scheduling.isInputPending.bind(navigator.scheduling);var k,S=!1,C=null,_=-1,E=5,L=-1;function P(){return!(e.unstable_now()-Le||125a?(r.sortIndex=i,t(c,r),null===n(l)&&r===n(c)&&(g?(v(_),_=-1):g=!0,I(x,i-a))):(r.sortIndex=s,t(l,r),p||f||(p=!0,A(w))),r},e.unstable_shouldYield=P,e.unstable_wrapCallback=function(e){var t=h;return function(){var n=h;h=t;try{return e.apply(this,arguments)}finally{h=n}}}}(q),function(e){e.exports=q}(G); +/** + * @license React + * react-dom.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ +var Y=a.exports,Z=G.exports;function X(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;n