Merge branch 'main' into bbox-ar

This commit is contained in:
blessedcoolant 2023-07-13 13:45:08 +12:00 committed by GitHub
commit b7a4f3c2cb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
56 changed files with 1207 additions and 723 deletions

View File

@ -1,25 +1,9 @@
# use this file as a whitelist
* *
!invokeai !invokeai
!ldm
!pyproject.toml !pyproject.toml
!docker/docker-entrypoint.sh
!LICENSE
# ignore frontend/web but whitelist dist **/node_modules
invokeai/frontend/web/ **/__pycache__
!invokeai/frontend/web/dist/ **/*.egg-info
# ignore invokeai/assets but whitelist invokeai/assets/web
invokeai/assets/
!invokeai/assets/web/
# Guard against pulling in any models that might exist in the directory tree
**/*.pt*
**/*.ckpt
# Byte-compiled / optimized / DLL files
**/__pycache__/
**/*.py[cod]
# Distribution / packaging
**/*.egg-info/
**/*.egg

View File

@ -3,17 +3,15 @@ on:
push: push:
branches: branches:
- 'main' - 'main'
- 'update/ci/docker/*'
- 'update/docker/*'
- 'dev/ci/docker/*'
- 'dev/docker/*'
paths: paths:
- 'pyproject.toml' - 'pyproject.toml'
- '.dockerignore' - '.dockerignore'
- 'invokeai/**' - 'invokeai/**'
- 'docker/Dockerfile' - 'docker/Dockerfile'
- 'docker/docker-entrypoint.sh'
- 'workflows/build-container.yml'
tags: tags:
- 'v*.*.*' - 'v*'
workflow_dispatch: workflow_dispatch:
permissions: permissions:
@ -26,23 +24,27 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
flavor: gpu-driver:
- rocm - cuda
- cuda - cpu
- cpu - rocm
include:
- flavor: rocm
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
- flavor: cuda
pip-extra-index-url: ''
- flavor: cpu
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: ${{ matrix.flavor }} name: ${{ matrix.gpu-driver }}
env: env:
PLATFORMS: 'linux/amd64,linux/arm64' # torch/arm64 does not support GPU currently, so arm64 builds
DOCKERFILE: 'docker/Dockerfile' # would not be GPU-accelerated.
# re-enable arm64 if there is sufficient demand.
# PLATFORMS: 'linux/amd64,linux/arm64'
PLATFORMS: 'linux/amd64'
steps: steps:
- name: Free up more disk space on the runner
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
run: |
sudo rm -rf /usr/share/dotnet
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
sudo swapoff /mnt/swapfile
sudo rm -rf /mnt/swapfile
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3
@ -53,7 +55,7 @@ jobs:
github-token: ${{ secrets.GITHUB_TOKEN }} github-token: ${{ secrets.GITHUB_TOKEN }}
images: | images: |
ghcr.io/${{ github.repository }} ghcr.io/${{ github.repository }}
${{ vars.DOCKERHUB_REPOSITORY }} ${{ env.DOCKERHUB_REPOSITORY }}
tags: | tags: |
type=ref,event=branch type=ref,event=branch
type=ref,event=tag type=ref,event=tag
@ -62,8 +64,8 @@ jobs:
type=pep440,pattern={{major}} type=pep440,pattern={{major}}
type=sha,enable=true,prefix=sha-,format=short type=sha,enable=true,prefix=sha-,format=short
flavor: | flavor: |
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }} latest=${{ matrix.gpu-driver == 'cuda' && github.ref == 'refs/heads/main' }}
suffix=-${{ matrix.flavor }},onlatest=false suffix=-${{ matrix.gpu-driver }},onlatest=false
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v2 uses: docker/setup-qemu-action@v2
@ -81,34 +83,33 @@ jobs:
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub # - name: Login to Docker Hub
if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != '' # if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
uses: docker/login-action@v2 # uses: docker/login-action@v2
with: # with:
username: ${{ secrets.DOCKERHUB_USERNAME }} # username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} # password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build container - name: Build container
id: docker_build id: docker_build
uses: docker/build-push-action@v4 uses: docker/build-push-action@v4
with: with:
context: . context: .
file: ${{ env.DOCKERFILE }} file: docker/Dockerfile
platforms: ${{ env.PLATFORMS }} platforms: ${{ env.PLATFORMS }}
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' }} push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' }}
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
cache-from: | cache-from: |
type=gha,scope=${{ github.ref_name }}-${{ matrix.flavor }} type=gha,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
type=gha,scope=main-${{ matrix.flavor }} type=gha,scope=main-${{ matrix.gpu-driver }}
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.flavor }} cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
- name: Docker Hub Description # - name: Docker Hub Description
if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != '' # if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
uses: peter-evans/dockerhub-description@v3 # uses: peter-evans/dockerhub-description@v3
with: # with:
username: ${{ secrets.DOCKERHUB_USERNAME }} # username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} # password: ${{ secrets.DOCKERHUB_TOKEN }}
repository: ${{ vars.DOCKERHUB_REPOSITORY }} # repository: ${{ vars.DOCKERHUB_REPOSITORY }}
short-description: ${{ github.event.repository.description }} # short-description: ${{ github.event.repository.description }}

13
docker/.env.sample Normal file
View File

@ -0,0 +1,13 @@
## Make a copy of this file named `.env` and fill in the values below.
## Any environment variables supported by InvokeAI can be specified here.
# INVOKEAI_ROOT is the path to a path on the local filesystem where InvokeAI will store data.
# Outputs will also be stored here by default.
# This **must** be an absolute path.
INVOKEAI_ROOT=
HUGGINGFACE_TOKEN=
## optional variables specific to the docker setup
# GPU_DRIVER=cuda
# CONTAINER_UID=1000

View File

@ -1,107 +1,129 @@
# syntax=docker/dockerfile:1 # syntax=docker/dockerfile:1.4
ARG PYTHON_VERSION=3.9 ## Builder stage
##################
## base image ##
##################
FROM --platform=${TARGETPLATFORM} python:${PYTHON_VERSION}-slim AS python-base
LABEL org.opencontainers.image.authors="mauwii@outlook.de" FROM library/ubuntu:22.04 AS builder
# Prepare apt for buildkit cache ARG DEBIAN_FRONTEND=noninteractive
RUN rm -f /etc/apt/apt.conf.d/docker-clean \ RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt update && apt-get install -y \
git \
python3.10-venv \
python3-pip \
build-essential
# Install dependencies ENV INVOKEAI_SRC=/opt/invokeai
RUN \ ENV VIRTUAL_ENV=/opt/venv/invokeai
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get install -y \
--no-install-recommends \
libgl1-mesa-glx=20.3.* \
libglib2.0-0=2.66.* \
libopencv-dev=4.5.*
# Set working directory and env ENV PATH="$VIRTUAL_ENV/bin:$PATH"
ARG APPDIR=/usr/src ARG TORCH_VERSION=2.0.1
ARG APPNAME=InvokeAI ARG TORCHVISION_VERSION=0.15.2
WORKDIR ${APPDIR} ARG GPU_DRIVER=cuda
ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH ARG TARGETPLATFORM="linux/amd64"
# Keeps Python from generating .pyc files in the container # unused but available
ENV PYTHONDONTWRITEBYTECODE 1 ARG BUILDPLATFORM
# Turns off buffering for easier container logging
ENV PYTHONUNBUFFERED 1
# Don't fall back to legacy build system
ENV PIP_USE_PEP517=1
####################### WORKDIR ${INVOKEAI_SRC}
## build pyproject ##
#######################
FROM python-base AS pyproject-builder
# Install build dependencies # Install pytorch before all other pip packages
RUN \ # NOTE: there are no pytorch builds for arm64 + cuda, only cpu
--mount=type=cache,target=/var/cache/apt,sharing=locked \ # x86_64/CUDA is default
--mount=type=cache,target=/var/lib/apt,sharing=locked \ RUN --mount=type=cache,target=/root/.cache/pip \
apt-get update \ python3 -m venv ${VIRTUAL_ENV} &&\
&& apt-get install -y \ if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
--no-install-recommends \ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
build-essential=12.9 \ elif [ "$GPU_DRIVER" = "rocm" ]; then \
gcc=4:10.2.* \ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.4.2"; \
python3-dev=3.9.* else \
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu118"; \
fi &&\
pip install $extra_index_url_arg \
torch==$TORCH_VERSION \
torchvision==$TORCHVISION_VERSION
# Prepare pip for buildkit cache # Install the local package.
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip # Editable mode helps use the same image for development:
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR} # the local working copy can be bind-mounted into the image
RUN mkdir -p ${PIP_CACHE_DIR} # at path defined by ${INVOKEAI_SRC}
COPY invokeai ./invokeai
COPY pyproject.toml ./
RUN --mount=type=cache,target=/root/.cache/pip \
# xformers + triton fails to install on arm64
if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
pip install -e ".[xformers]"; \
else \
pip install -e "."; \
fi
# Create virtual environment # #### Build the Web UI ------------------------------------
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
python3 -m venv "${APPNAME}" \
--upgrade-deps
# Install requirements FROM node:18 AS web-builder
COPY --link pyproject.toml . WORKDIR /build
COPY --link invokeai/version/invokeai_version.py invokeai/version/__init__.py invokeai/version/ COPY invokeai/frontend/web/ ./
ARG PIP_EXTRA_INDEX_URL RUN --mount=type=cache,target=/usr/lib/node_modules \
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL} npm install --include dev
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \ RUN --mount=type=cache,target=/usr/lib/node_modules \
"${APPNAME}"/bin/pip install . yarn vite build
# Install pyproject.toml
COPY --link . .
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
"${APPNAME}/bin/pip" install .
# Build patchmatch #### Runtime stage ---------------------------------------
FROM library/ubuntu:22.04 AS runtime
ARG DEBIAN_FRONTEND=noninteractive
ENV PYTHONUNBUFFERED=1
ENV PYTHONDONTWRITEBYTECODE=1
RUN apt update && apt install -y --no-install-recommends \
git \
curl \
vim \
tmux \
ncdu \
iotop \
bzip2 \
gosu \
libglib2.0-0 \
libgl1-mesa-glx \
python3-venv \
python3-pip \
build-essential \
libopencv-dev \
libstdc++-10-dev &&\
apt-get clean && apt-get autoclean
# globally add magic-wormhole
# for ease of transferring data to and from the container
# when running in sandboxed cloud environments; e.g. Runpod etc.
RUN pip install magic-wormhole
ENV INVOKEAI_SRC=/opt/invokeai
ENV VIRTUAL_ENV=/opt/venv/invokeai
ENV INVOKEAI_ROOT=/invokeai
ENV PATH="$VIRTUAL_ENV/bin:$INVOKEAI_SRC:$PATH"
# --link requires buldkit w/ dockerfile syntax 1.4
COPY --link --from=builder ${INVOKEAI_SRC} ${INVOKEAI_SRC}
COPY --link --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist
# Link amdgpu.ids for ROCm builds
# contributed by https://github.com/Rubonnek
RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids"
WORKDIR ${INVOKEAI_SRC}
# build patchmatch
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
RUN python3 -c "from patchmatch import patch_match" RUN python3 -c "from patchmatch import patch_match"
##################### # Create unprivileged user and make the local dir
## runtime image ## RUN useradd --create-home --shell /bin/bash -u 1000 --comment "container local user" invoke
##################### RUN mkdir -p ${INVOKEAI_ROOT} && chown -R invoke:invoke ${INVOKEAI_ROOT}
FROM python-base AS runtime
# Create a new user COPY docker/docker-entrypoint.sh ./
ARG UNAME=appuser ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
RUN useradd \ CMD ["invokeai-web", "--host", "0.0.0.0"]
--no-log-init \
-m \
-U \
"${UNAME}"
# Create volume directory
ARG VOLUME_DIR=/data
RUN mkdir -p "${VOLUME_DIR}" \
&& chown -hR "${UNAME}:${UNAME}" "${VOLUME_DIR}"
# Setup runtime environment
USER ${UNAME}:${UNAME}
COPY --chown=${UNAME}:${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME}
ENV INVOKEAI_ROOT ${VOLUME_DIR}
ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache
ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only"
EXPOSE 9090
ENTRYPOINT [ "invokeai" ]
CMD [ "--web", "--host", "0.0.0.0", "--port", "9090" ]
VOLUME [ "${VOLUME_DIR}" ]

77
docker/README.md Normal file
View File

@ -0,0 +1,77 @@
# InvokeAI Containerized
All commands are to be run from the `docker` directory: `cd docker`
#### Linux
1. Ensure builkit is enabled in the Docker daemon settings (`/etc/docker/daemon.json`)
2. Install the `docker compose` plugin using your package manager, or follow a [tutorial](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-compose-on-ubuntu-22-04).
- The deprecated `docker-compose` (hyphenated) CLI continues to work for now.
3. Ensure docker daemon is able to access the GPU.
- You may need to install [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
#### macOS
1. Ensure Docker has at least 16GB RAM
2. Enable VirtioFS for file sharing
3. Enable `docker compose` V2 support
This is done via Docker Desktop preferences
## Quickstart
1. Make a copy of `env.sample` and name it `.env` (`cp env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to:
a. the desired location of the InvokeAI runtime directory, or
b. an existing, v3.0.0 compatible runtime directory.
1. `docker compose up`
The image will be built automatically if needed.
The runtime directory (holding models and outputs) will be created in the location specified by `INVOKEAI_ROOT`. The default location is `~/invokeai`. The runtime directory will be populated with the base configs and models necessary to start generating.
### Use a GPU
- Linux is *recommended* for GPU support in Docker.
- WSL2 is *required* for Windows.
- only `x86_64` architecture is supported.
The Docker daemon on the system must be already set up to use the GPU. In case of Linux, this involves installing `nvidia-docker-runtime` and configuring the `nvidia` runtime as default. Steps will be different for AMD. Please see Docker documentation for the most up-to-date instructions for using your GPU with Docker.
## Customize
Check the `.env.sample` file. It contains some environment variables for running in Docker. Copy it, name it `.env`, and fill it in with your own values. Next time you run `docker compose up`, your custom values will be used.
You can also set these values in `docker compose.yml` directly, but `.env` will help avoid conflicts when code is updated.
Example (most values are optional):
```
INVOKEAI_ROOT=/Volumes/WorkDrive/invokeai
HUGGINGFACE_TOKEN=the_actual_token
CONTAINER_UID=1000
GPU_DRIVER=cuda
```
## Even Moar Customizing!
See the `docker compose.yaml` file. The `command` instruction can be uncommented and used to run arbitrary startup commands. Some examples below.
### Reconfigure the runtime directory
Can be used to download additional models from the supported model list
In conjunction with `INVOKEAI_ROOT` can be also used to initialize a runtime directory
```
command:
- invokeai-configure
- --yes
```
Or install models:
```
command:
- invokeai-model-install
```

View File

@ -1,51 +1,11 @@
#!/usr/bin/env bash #!/usr/bin/env bash
set -e set -e
# If you want to build a specific flavor, set the CONTAINER_FLAVOR environment variable build_args=""
# e.g. CONTAINER_FLAVOR=cpu ./build.sh
# Possible Values are:
# - cpu
# - cuda
# - rocm
# Don't forget to also set it when executing run.sh
# if it is not set, the script will try to detect the flavor by itself.
#
# Doc can be found here:
# https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}") [[ -f ".env" ]] && build_args=$(awk '$1 ~ /\=[^$]/ {print "--build-arg " $0 " "}' .env)
cd "$SCRIPTDIR" || exit 1
source ./env.sh echo "docker-compose build args:"
echo $build_args
DOCKERFILE=${INVOKE_DOCKERFILE:-./Dockerfile} docker-compose build $build_args
# print the settings
echo -e "You are using these values:\n"
echo -e "Dockerfile:\t\t${DOCKERFILE}"
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
echo -e "Volumename:\t\t${VOLUMENAME}"
echo -e "Platform:\t\t${PLATFORM}"
echo -e "Container Registry:\t${CONTAINER_REGISTRY}"
echo -e "Container Repository:\t${CONTAINER_REPOSITORY}"
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
echo -e "Container Flavor:\t${CONTAINER_FLAVOR}"
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
# Create docker volume
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
echo -e "Volume already exists\n"
else
echo -n "creating docker volume "
docker volume create "${VOLUMENAME}"
fi
# Build Container
docker build \
--platform="${PLATFORM:-linux/amd64}" \
--tag="${CONTAINER_IMAGE:-invokeai}" \
${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
--file="${DOCKERFILE}" \
..

48
docker/docker-compose.yml Normal file
View File

@ -0,0 +1,48 @@
# Copyright (c) 2023 Eugene Brodsky https://github.com/ebr
version: '3.8'
services:
invokeai:
image: "local/invokeai:latest"
# edit below to run on a container runtime other than nvidia-container-runtime.
# not yet tested with rocm/AMD GPUs
# Comment out the "deploy" section to run on CPU only
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
build:
context: ..
dockerfile: docker/Dockerfile
# variables without a default will automatically inherit from the host environment
environment:
- INVOKEAI_ROOT
- HF_HOME
# Create a .env file in the same directory as this docker-compose.yml file
# and populate it with environment variables. See .env.sample
env_file:
- .env
ports:
- "${INVOKEAI_PORT:-9090}:9090"
volumes:
- ${INVOKEAI_ROOT:-~/invokeai}:${INVOKEAI_ROOT:-/invokeai}
- ${HF_HOME:-~/.cache/huggingface}:${HF_HOME:-/invokeai/.cache/huggingface}
# - ${INVOKEAI_MODELS_DIR:-${INVOKEAI_ROOT:-/invokeai/models}}
# - ${INVOKEAI_MODELS_CONFIG_PATH:-${INVOKEAI_ROOT:-/invokeai/configs/models.yaml}}
tty: true
stdin_open: true
# # Example of running alternative commands/scripts in the container
# command:
# - bash
# - -c
# - |
# invokeai-model-install --yes --default-only --config_file ${INVOKEAI_ROOT}/config_custom.yaml
# invokeai-nodes-web --host 0.0.0.0

65
docker/docker-entrypoint.sh Executable file
View File

@ -0,0 +1,65 @@
#!/bin/bash
set -e -o pipefail
### Container entrypoint
# Runs the CMD as defined by the Dockerfile or passed to `docker run`
# Can be used to configure the runtime dir
# Bypass by using ENTRYPOINT or `--entrypoint`
### Set INVOKEAI_ROOT pointing to a valid runtime directory
# Otherwise configure the runtime dir first.
### Configure the InvokeAI runtime directory (done by default)):
# docker run --rm -it <this image> --configure
# or skip with --no-configure
### Set the CONTAINER_UID envvar to match your user.
# Ensures files created in the container are owned by you:
# docker run --rm -it -v /some/path:/invokeai -e CONTAINER_UID=$(id -u) <this image>
# Default UID: 1000 chosen due to popularity on Linux systems. Possibly 501 on MacOS.
USER_ID=${CONTAINER_UID:-1000}
USER=invoke
usermod -u ${USER_ID} ${USER} 1>/dev/null
configure() {
# Configure the runtime directory
if [[ -f ${INVOKEAI_ROOT}/invokeai.yaml ]]; then
echo "${INVOKEAI_ROOT}/invokeai.yaml exists. InvokeAI is already configured."
echo "To reconfigure InvokeAI, delete the above file."
echo "======================================================================"
else
mkdir -p ${INVOKEAI_ROOT}
chown --recursive ${USER} ${INVOKEAI_ROOT}
gosu ${USER} invokeai-configure --yes --default_only
fi
}
## Skip attempting to configure.
## Must be passed first, before any other args.
if [[ $1 != "--no-configure" ]]; then
configure
else
shift
fi
### Set the $PUBLIC_KEY env var to enable SSH access.
# We do not install openssh-server in the image by default to avoid bloat.
# but it is useful to have the full SSH server e.g. on Runpod.
# (use SCP to copy files to/from the image, etc)
if [[ -v "PUBLIC_KEY" ]] && [[ ! -d "${HOME}/.ssh" ]]; then
apt-get update
apt-get install -y openssh-server
pushd $HOME
mkdir -p .ssh
echo ${PUBLIC_KEY} > .ssh/authorized_keys
chmod -R 700 .ssh
popd
service ssh start
fi
cd ${INVOKEAI_ROOT}
# Run the CMD as the Container User (not root).
exec gosu ${USER} "$@"

View File

@ -1,54 +0,0 @@
#!/usr/bin/env bash
# This file is used to set environment variables for the build.sh and run.sh scripts.
# Try to detect the container flavor if no PIP_EXTRA_INDEX_URL got specified
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
# Activate virtual environment if not already activated and exists
if [[ -z $VIRTUAL_ENV ]]; then
[[ -e "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" ]] \
&& source "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" \
&& echo "Activated virtual environment: $VIRTUAL_ENV"
fi
# Decide which container flavor to build if not specified
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
# Check for CUDA and ROCm
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
if [[ "${CUDA_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR="cuda"
elif [[ "${ROCM_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR="rocm"
else
CONTAINER_FLAVOR="cpu"
fi
fi
# Set PIP_EXTRA_INDEX_URL based on container flavor
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
elif [[ "$CONTAINER_FLAVOR" == "cpu" ]]; then
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu"
# elif [[ -z "$CONTAINER_FLAVOR" || "$CONTAINER_FLAVOR" == "cuda" ]]; then
# PIP_PACKAGE=${PIP_PACKAGE-".[xformers]"}
fi
fi
# Variables shared by build.sh and run.sh
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
REPOSITORY_NAME="${REPOSITORY_NAME,,}"
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME}_data"}"
ARCH="${ARCH-$(uname -m)}"
PLATFORM="${PLATFORM-linux/${ARCH}}"
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
CONTAINER_FLAVOR="${CONTAINER_FLAVOR-cuda}"
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"
# enable docker buildkit
export DOCKER_BUILDKIT=1

View File

@ -1,41 +1,8 @@
#!/usr/bin/env bash #!/usr/bin/env bash
set -e set -e
# How to use: https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}") SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
cd "$SCRIPTDIR" || exit 1 cd "$SCRIPTDIR" || exit 1
source ./env.sh docker-compose up --build -d
docker-compose logs -f
# Create outputs directory if it does not exist
[[ -d ./outputs ]] || mkdir ./outputs
echo -e "You are using these values:\n"
echo -e "Volumename:\t${VOLUMENAME}"
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
echo -e "local Models:\t${MODELSPATH:-unset}\n"
docker run \
--interactive \
--tty \
--rm \
--platform="${PLATFORM}" \
--name="${REPOSITORY_NAME}" \
--hostname="${REPOSITORY_NAME}" \
--mount type=volume,volume-driver=local,source="${VOLUMENAME}",target=/data \
--mount type=bind,source="$(pwd)"/outputs/,target=/data/outputs/ \
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
--publish=9090:9090 \
--cap-add=sys_nice \
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
"${CONTAINER_IMAGE}" ${@:+$@}
echo -e "\nCleaning trash folder ..."
for f in outputs/.Trash*; do
if [ -e "$f" ]; then
rm -Rf "$f"
break
fi
done

60
docker/runpod-readme.md Normal file
View File

@ -0,0 +1,60 @@
# InvokeAI - A Stable Diffusion Toolkit
Stable Diffusion distribution by InvokeAI: https://github.com/invoke-ai
The Docker image tracks the `main` branch of the InvokeAI project, which means it includes the latest features, but may contain some bugs.
Your working directory is mounted under the `/workspace` path inside the pod. The models are in `/workspace/invokeai/models`, and outputs are in `/workspace/invokeai/outputs`.
> **Only the /workspace directory will persist between pod restarts!**
> **If you _terminate_ (not just _stop_) the pod, the /workspace will be lost.**
## Quickstart
1. Launch a pod from this template. **It will take about 5-10 minutes to run through the initial setup**. Be patient.
1. Wait for the application to load.
- TIP: you know it's ready when the CPU usage goes idle
- You can also check the logs for a line that says "_Point your browser at..._"
1. Open the Invoke AI web UI: click the `Connect` => `connect over HTTP` button.
1. Generate some art!
## Other things you can do
At any point you may edit the pod configuration and set an arbitrary Docker command. For example, you could run a command to downloads some models using `curl`, or fetch some images and place them into your outputs to continue a working session.
If you need to run *multiple commands*, define them in the Docker Command field like this:
`bash -c "cd ${INVOKEAI_ROOT}/outputs; wormhole receive 2-foo-bar; invoke.py --web --host 0.0.0.0"`
### Copying your data in and out of the pod
This image includes a couple of handy tools to help you get the data into the pod (such as your custom models or embeddings), and out of the pod (such as downloading your outputs). Here are your options for getting your data in and out of the pod:
- **SSH server**:
1. Make sure to create and set your Public Key in the RunPod settings (follow the official instructions)
1. Add an exposed port 22 (TCP) in the pod settings!
1. When your pod restarts, you will see a new entry in the `Connect` dialog. Use this SSH server to `scp` or `sftp` your files as necessary, or SSH into the pod using the fully fledged SSH server.
- [**Magic Wormhole**](https://magic-wormhole.readthedocs.io/en/latest/welcome.html):
1. On your computer, `pip install magic-wormhole` (see above instructions for details)
1. Connect to the command line **using the "light" SSH client** or the browser-based console. _Currently there's a bug where `wormhole` isn't available when connected to "full" SSH server, as described above_.
1. `wormhole send /workspace/invokeai/outputs` will send the entire `outputs` directory. You can also send individual files.
1. Once packaged, you will see a `wormhole receive <123-some-words>` command. Copy it
1. Paste this command into the terminal on your local machine to securely download the payload.
1. It works the same in reverse: you can `wormhole send` some models from your computer to the pod. Again, save your files somewhere in `/workspace` or they will be lost when the pod is stopped.
- **RunPod's Cloud Sync feature** may be used to sync the persistent volume to cloud storage. You could, for example, copy the entire `/workspace` to S3, add some custom models to it, and copy it back from S3 when launching new pod configurations. Follow the Cloud Sync instructions.
### Disable the NSFW checker
The NSFW checker is enabled by default. To disable it, edit the pod configuration and set the following command:
```
invoke --web --host 0.0.0.0 --no-nsfw_checker
```
---
Template ©2023 Eugene Brodsky [ebr](https://github.com/ebr)

View File

@ -248,6 +248,7 @@ class InvokeAiInstance:
"install", "install",
"--require-virtualenv", "--require-virtualenv",
"torch~=2.0.0", "torch~=2.0.0",
"torchmetrics==0.11.4",
"torchvision>=0.14.1", "torchvision>=0.14.1",
"--force-reinstall", "--force-reinstall",
"--find-links" if find_links is not None else None, "--find-links" if find_links is not None else None,

View File

@ -20,7 +20,7 @@ echo 9. Update InvokeAI
echo 10. Command-line help echo 10. Command-line help
echo Q - Quit echo Q - Quit
set /P choice="Please enter 1-10, Q: [2] " set /P choice="Please enter 1-10, Q: [2] "
if not defined choice set choice=2 if not defined choice set choice=1
IF /I "%choice%" == "1" ( IF /I "%choice%" == "1" (
echo Starting the InvokeAI browser-based UI.. echo Starting the InvokeAI browser-based UI..
python .venv\Scripts\invokeai-web.exe %* python .venv\Scripts\invokeai-web.exe %*
@ -56,7 +56,7 @@ IF /I "%choice%" == "1" (
call cmd /k call cmd /k
) ELSE IF /I "%choice%" == "9" ( ) ELSE IF /I "%choice%" == "9" (
echo Running invokeai-update... echo Running invokeai-update...
python .venv\Scripts\invokeai-update.exe %* python -m invokeai.frontend.install.invokeai_update
) ELSE IF /I "%choice%" == "10" ( ) ELSE IF /I "%choice%" == "10" (
echo Displaying command line help... echo Displaying command line help...
python .venv\Scripts\invokeai.exe --help %* python .venv\Scripts\invokeai.exe --help %*

View File

@ -93,7 +93,7 @@ do_choice() {
9) 9)
clear clear
printf "Update InvokeAI\n" printf "Update InvokeAI\n"
invokeai-update python -m invokeai.frontend.install.invokeai_update
;; ;;
10) 10)
clear clear

View File

@ -17,6 +17,7 @@ from invokeai.app.services.metadata import CoreMetadataService
from invokeai.app.services.resource_name import SimpleNameService from invokeai.app.services.resource_name import SimpleNameService
from invokeai.app.services.urls import LocalUrlService from invokeai.app.services.urls import LocalUrlService
from invokeai.backend.util.logging import InvokeAILogger from invokeai.backend.util.logging import InvokeAILogger
from invokeai.version.invokeai_version import __version__
from ..services.default_graphs import create_system_graphs from ..services.default_graphs import create_system_graphs
from ..services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage from ..services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
@ -58,7 +59,8 @@ class ApiDependencies:
@staticmethod @staticmethod
def initialize(config, event_handler_id: int, logger: Logger = logger): def initialize(config, event_handler_id: int, logger: Logger = logger):
logger.info(f"Internet connectivity is {config.internet_available}") logger.debug(f'InvokeAI version {__version__}')
logger.debug(f"Internet connectivity is {config.internet_available}")
events = FastAPIEventService(event_handler_id) events = FastAPIEventService(event_handler_id)

View File

@ -1,5 +1,6 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) # Copyright (c) 2022-2023 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team
import asyncio import asyncio
import sys
from inspect import signature from inspect import signature
import uvicorn import uvicorn
@ -20,6 +21,13 @@ from ..backend.util.logging import InvokeAILogger
app_config = InvokeAIAppConfig.get_config() app_config = InvokeAIAppConfig.get_config()
app_config.parse_args() app_config.parse_args()
logger = InvokeAILogger.getLogger(config=app_config) logger = InvokeAILogger.getLogger(config=app_config)
from invokeai.version.invokeai_version import __version__
# we call this early so that the message appears before
# other invokeai initialization messages
if app_config.version:
print(f'InvokeAI version {__version__}')
sys.exit(0)
import invokeai.frontend.web as web_dir import invokeai.frontend.web as web_dir
import mimetypes import mimetypes
@ -29,6 +37,7 @@ from .api.routers import sessions, models, images, boards, board_images, app_inf
from .api.sockets import SocketIO from .api.sockets import SocketIO
from .invocations.baseinvocation import BaseInvocation from .invocations.baseinvocation import BaseInvocation
import torch import torch
if torch.backends.mps.is_available(): if torch.backends.mps.is_available():
import invokeai.backend.util.mps_fixes import invokeai.backend.util.mps_fixes

View File

@ -16,6 +16,12 @@ from invokeai.backend.util.logging import InvokeAILogger
config = InvokeAIAppConfig.get_config() config = InvokeAIAppConfig.get_config()
config.parse_args() config.parse_args()
logger = InvokeAILogger().getLogger(config=config) logger = InvokeAILogger().getLogger(config=config)
from invokeai.version.invokeai_version import __version__
# we call this early so that the message appears before other invokeai initialization messages
if config.version:
print(f'InvokeAI version {__version__}')
sys.exit(0)
from invokeai.app.services.board_image_record_storage import ( from invokeai.app.services.board_image_record_storage import (
SqliteBoardImageRecordStorage, SqliteBoardImageRecordStorage,
@ -208,6 +214,7 @@ def invoke_all(context: CliContext):
raise SessionError() raise SessionError()
def invoke_cli(): def invoke_cli():
logger.info(f'InvokeAI version {__version__}')
# get the optional list of invocations to execute on the command line # get the optional list of invocations to execute on the command line
parser = config.get_parser() parser = config.get_parser()
parser.add_argument('commands',nargs='*') parser.add_argument('commands',nargs='*')

View File

@ -23,7 +23,8 @@ InvokeAI:
xformers_enabled: false xformers_enabled: false
sequential_guidance: false sequential_guidance: false
precision: float16 precision: float16
max_loaded_models: 4 max_cache_size: 6
max_vram_cache_size: 2.7
always_use_cpu: false always_use_cpu: false
free_gpu_mem: false free_gpu_mem: false
Features: Features:
@ -168,7 +169,7 @@ from argparse import ArgumentParser
from omegaconf import OmegaConf, DictConfig from omegaconf import OmegaConf, DictConfig
from pathlib import Path from pathlib import Path
from pydantic import BaseSettings, Field, parse_obj_as from pydantic import BaseSettings, Field, parse_obj_as
from typing import ClassVar, Dict, List, Literal, Union, get_origin, get_type_hints, get_args from typing import ClassVar, Dict, List, Set, Literal, Union, get_origin, get_type_hints, get_args
INIT_FILE = Path('invokeai.yaml') INIT_FILE = Path('invokeai.yaml')
MODEL_CORE = Path('models/core') MODEL_CORE = Path('models/core')
@ -270,7 +271,8 @@ class InvokeAISettings(BaseSettings):
@classmethod @classmethod
def _excluded(self)->List[str]: def _excluded(self)->List[str]:
return ['type','initconf'] # combination of deprecated parameters and internal ones
return ['type','initconf', 'gpu_mem_reserved', 'max_loaded_models', 'version']
class Config: class Config:
env_file_encoding = 'utf-8' env_file_encoding = 'utf-8'
@ -363,8 +365,10 @@ setting environment variables INVOKEAI_<setting>.
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance') always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance')
free_gpu_mem : bool = Field(default=False, description="If true, purge model from GPU after each generation.", category='Memory/Performance') free_gpu_mem : bool = Field(default=False, description="If true, purge model from GPU after each generation.", category='Memory/Performance')
max_loaded_models : int = Field(default=3, gt=0, description="(DEPRECATED: use max_cache_size) Maximum number of models to keep in memory for rapid switching", category='Memory/Performance') max_loaded_models : int = Field(default=3, gt=0, description="(DEPRECATED: use max_cache_size) Maximum number of models to keep in memory for rapid switching", category='DEPRECATED')
max_cache_size : float = Field(default=6.0, gt=0, description="Maximum memory amount used by model cache for rapid switching", category='Memory/Performance') max_cache_size : float = Field(default=6.0, gt=0, description="Maximum memory amount used by model cache for rapid switching", category='Memory/Performance')
max_vram_cache_size : float = Field(default=2.75, ge=0, description="Amount of VRAM reserved for model storage", category='Memory/Performance')
gpu_mem_reserved : float = Field(default=2.75, ge=0, description="DEPRECATED: use max_vram_cache_size. Amount of VRAM reserved for model storage", category='DEPRECATED')
precision : Literal[tuple(['auto','float16','float32','autocast'])] = Field(default='float16',description='Floating point precision', category='Memory/Performance') precision : Literal[tuple(['auto','float16','float32','autocast'])] = Field(default='float16',description='Floating point precision', category='Memory/Performance')
sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance') sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance')
xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance') xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance')
@ -389,6 +393,8 @@ setting environment variables INVOKEAI_<setting>.
# note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues # note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues
log_format : Literal[tuple(['plain','color','syslog','legacy'])] = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style', category="Logging") log_format : Literal[tuple(['plain','color','syslog','legacy'])] = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style', category="Logging")
log_level : Literal[tuple(["debug","info","warning","error","critical"])] = Field(default="debug", description="Emit logging messages at this level or higher", category="Logging") log_level : Literal[tuple(["debug","info","warning","error","critical"])] = Field(default="debug", description="Emit logging messages at this level or higher", category="Logging")
version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other")
#fmt: on #fmt: on
def parse_args(self, argv: List[str]=None, conf: DictConfig = None, clobber=False): def parse_args(self, argv: List[str]=None, conf: DictConfig = None, clobber=False):

View File

@ -258,8 +258,6 @@ class ModelManagerService(ModelManagerServiceBase):
config_file = config.model_conf_path config_file = config.model_conf_path
else: else:
config_file = config.root_dir / "configs/models.yaml" config_file = config.root_dir / "configs/models.yaml"
if not config_file.exists():
raise IOError(f"The file {config_file} could not be found.")
logger.debug(f'config file={config_file}') logger.debug(f'config file={config_file}')

View File

@ -104,6 +104,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
except Exception as e: except Exception as e:
error = traceback.format_exc() error = traceback.format_exc()
logger.error(error)
# Save error # Save error
graph_execution_state.set_node_error(invocation.id, error) graph_execution_state.set_node_error(invocation.id, error)

View File

@ -36,6 +36,9 @@ from .models import BaseModelType, ModelType, SubModelType, ModelBase
# Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously # Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously
DEFAULT_MAX_CACHE_SIZE = 6.0 DEFAULT_MAX_CACHE_SIZE = 6.0
# amount of GPU memory to hold in reserve for use by generations (GB)
DEFAULT_MAX_VRAM_CACHE_SIZE= 2.75
# actual size of a gig # actual size of a gig
GIG = 1073741824 GIG = 1073741824
@ -82,6 +85,7 @@ class ModelCache(object):
def __init__( def __init__(
self, self,
max_cache_size: float=DEFAULT_MAX_CACHE_SIZE, max_cache_size: float=DEFAULT_MAX_CACHE_SIZE,
max_vram_cache_size: float=DEFAULT_MAX_VRAM_CACHE_SIZE,
execution_device: torch.device=torch.device('cuda'), execution_device: torch.device=torch.device('cuda'),
storage_device: torch.device=torch.device('cpu'), storage_device: torch.device=torch.device('cpu'),
precision: torch.dtype=torch.float16, precision: torch.dtype=torch.float16,
@ -99,12 +103,11 @@ class ModelCache(object):
:param sequential_offload: Conserve VRAM by loading and unloading each stage of the pipeline sequentially :param sequential_offload: Conserve VRAM by loading and unloading each stage of the pipeline sequentially
:param sha_chunksize: Chunksize to use when calculating sha256 model hash :param sha_chunksize: Chunksize to use when calculating sha256 model hash
''' '''
#max_cache_size = 9999
self.model_infos: Dict[str, ModelBase] = dict() self.model_infos: Dict[str, ModelBase] = dict()
self.lazy_offloading = lazy_offloading self.lazy_offloading = lazy_offloading
#self.sequential_offload: bool=sequential_offload
self.precision: torch.dtype=precision self.precision: torch.dtype=precision
self.max_cache_size: int=max_cache_size self.max_cache_size: float=max_cache_size
self.max_vram_cache_size: float=max_vram_cache_size
self.execution_device: torch.device=execution_device self.execution_device: torch.device=execution_device
self.storage_device: torch.device=storage_device self.storage_device: torch.device=storage_device
self.sha_chunksize=sha_chunksize self.sha_chunksize=sha_chunksize
@ -201,14 +204,22 @@ class ModelCache(object):
self._cache_stack.remove(key) self._cache_stack.remove(key)
self._cache_stack.append(key) self._cache_stack.append(key)
return self.ModelLocker(self, key, cache_entry.model, gpu_load) return self.ModelLocker(self, key, cache_entry.model, gpu_load, cache_entry.size)
class ModelLocker(object): class ModelLocker(object):
def __init__(self, cache, key, model, gpu_load): def __init__(self, cache, key, model, gpu_load, size_needed):
'''
:param cache: The model_cache object
:param key: The key of the model to lock in GPU
:param model: The model to lock
:param gpu_load: True if load into gpu
:param size_needed: Size of the model to load
'''
self.gpu_load = gpu_load self.gpu_load = gpu_load
self.cache = cache self.cache = cache
self.key = key self.key = key
self.model = model self.model = model
self.size_needed = size_needed
self.cache_entry = self.cache._cached_models[self.key] self.cache_entry = self.cache._cached_models[self.key]
def __enter__(self) -> Any: def __enter__(self) -> Any:
@ -222,7 +233,7 @@ class ModelCache(object):
try: try:
if self.cache.lazy_offloading: if self.cache.lazy_offloading:
self.cache._offload_unlocked_models() self.cache._offload_unlocked_models(self.size_needed)
if self.model.device != self.cache.execution_device: if self.model.device != self.cache.execution_device:
self.cache.logger.debug(f'Moving {self.key} into {self.cache.execution_device}') self.cache.logger.debug(f'Moving {self.key} into {self.cache.execution_device}')
@ -337,14 +348,20 @@ class ModelCache(object):
self.logger.debug(f"After unloading: cached_models={len(self._cached_models)}") self.logger.debug(f"After unloading: cached_models={len(self._cached_models)}")
def _offload_unlocked_models(self, size_needed: int=0):
def _offload_unlocked_models(self): reserved = self.max_vram_cache_size * GIG
for model_key, cache_entry in self._cached_models.items(): vram_in_use = torch.cuda.memory_allocated()
self.logger.debug(f'{(vram_in_use/GIG):.2f}GB VRAM used for models; max allowed={(reserved/GIG):.2f}GB')
for model_key, cache_entry in sorted(self._cached_models.items(), key=lambda x:x[1].size):
if vram_in_use <= reserved:
break
if not cache_entry.locked and cache_entry.loaded: if not cache_entry.locked and cache_entry.loaded:
self.logger.debug(f'Offloading {model_key} from {self.execution_device} into {self.storage_device}') self.logger.debug(f'Offloading {model_key} from {self.execution_device} into {self.storage_device}')
with VRAMUsage() as mem: with VRAMUsage() as mem:
cache_entry.model.to(self.storage_device) cache_entry.model.to(self.storage_device)
self.logger.debug(f'GPU VRAM freed: {(mem.vram_used/GIG):.2f} GB') self.logger.debug(f'GPU VRAM freed: {(mem.vram_used/GIG):.2f} GB')
vram_in_use += mem.vram_used # note vram_used is negative
self.logger.debug(f'{(vram_in_use/GIG):.2f}GB VRAM used for models; max allowed={(reserved/GIG):.2f}GB')
def _local_model_hash(self, model_path: Union[str, Path]) -> str: def _local_model_hash(self, model_path: Union[str, Path]) -> str:
sha = hashlib.sha256() sha = hashlib.sha256()

View File

@ -231,6 +231,7 @@ from __future__ import annotations
import os import os
import hashlib import hashlib
import textwrap import textwrap
import yaml
from dataclasses import dataclass from dataclasses import dataclass
from pathlib import Path from pathlib import Path
from typing import Optional, List, Tuple, Union, Dict, Set, Callable, types from typing import Optional, List, Tuple, Union, Dict, Set, Callable, types
@ -249,8 +250,8 @@ from .model_cache import ModelCache, ModelLocker
from .models import ( from .models import (
BaseModelType, ModelType, SubModelType, BaseModelType, ModelType, SubModelType,
ModelError, SchedulerPredictionType, MODEL_CLASSES, ModelError, SchedulerPredictionType, MODEL_CLASSES,
ModelConfigBase, ModelNotFoundException, ModelConfigBase, ModelNotFoundException, InvalidModelException,
) )
# We are only starting to number the config file with release 3. # We are only starting to number the config file with release 3.
# The config file version doesn't have to start at release version, but it will help # The config file version doesn't have to start at release version, but it will help
@ -274,10 +275,6 @@ class ModelInfo():
def __exit__(self,*args, **kwargs): def __exit__(self,*args, **kwargs):
self.context.__exit__(*args, **kwargs) self.context.__exit__(*args, **kwargs)
class InvalidModelError(Exception):
"Raised when an invalid model is requested"
pass
class AddModelResult(BaseModel): class AddModelResult(BaseModel):
name: str = Field(description="The name of the model after installation") name: str = Field(description="The name of the model after installation")
model_type: ModelType = Field(description="The type of model") model_type: ModelType = Field(description="The type of model")
@ -314,6 +311,9 @@ class ModelManager(object):
self.config_path = None self.config_path = None
if isinstance(config, (str, Path)): if isinstance(config, (str, Path)):
self.config_path = Path(config) self.config_path = Path(config)
if not self.config_path.exists():
logger.warning(f'The file {self.config_path} was not found. Initializing a new file')
self.initialize_model_config(self.config_path)
config = OmegaConf.load(self.config_path) config = OmegaConf.load(self.config_path)
elif not isinstance(config, DictConfig): elif not isinstance(config, DictConfig):
@ -336,6 +336,7 @@ class ModelManager(object):
self.logger = logger self.logger = logger
self.cache = ModelCache( self.cache = ModelCache(
max_cache_size=max_cache_size, max_cache_size=max_cache_size,
max_vram_cache_size = self.app_config.max_vram_cache_size,
execution_device = device_type, execution_device = device_type,
precision = precision, precision = precision,
sequential_offload = sequential_offload, sequential_offload = sequential_offload,
@ -386,6 +387,16 @@ class ModelManager(object):
def _get_model_cache_path(self, model_path): def _get_model_cache_path(self, model_path):
return self.app_config.models_path / ".cache" / hashlib.md5(str(model_path).encode()).hexdigest() return self.app_config.models_path / ".cache" / hashlib.md5(str(model_path).encode()).hexdigest()
@classmethod
def initialize_model_config(cls, config_path: Path):
"""Create empty config file"""
with open(config_path,'w') as yaml_file:
yaml_file.write(yaml.dump({'__metadata__':
{'version':'3.0.0'}
}
)
)
def get_model( def get_model(
self, self,
model_name: str, model_name: str,
@ -802,6 +813,8 @@ class ModelManager(object):
model_config: ModelConfigBase = model_class.probe_config(str(model_path)) model_config: ModelConfigBase = model_class.probe_config(str(model_path))
self.models[model_key] = model_config self.models[model_key] = model_config
new_models_found = True new_models_found = True
except InvalidModelException:
self.logger.warning(f"Not a valid model: {model_path}")
except NotImplementedError as e: except NotImplementedError as e:
self.logger.warning(e) self.logger.warning(e)
@ -853,16 +866,22 @@ class ModelManager(object):
scanned_dirs.add(path) scanned_dirs.add(path)
continue continue
if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin','pytorch_lora_weights.bin'}]): if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin','pytorch_lora_weights.bin'}]):
new_models_found.update(installer.heuristic_import(path)) try:
scanned_dirs.add(path) new_models_found.update(installer.heuristic_import(path))
scanned_dirs.add(path)
except ValueError as e:
self.logger.warning(str(e))
for f in files: for f in files:
path = Path(root) / f path = Path(root) / f
if path in known_paths or path.parent in scanned_dirs: if path in known_paths or path.parent in scanned_dirs:
continue continue
if path.suffix in {'.ckpt','.bin','.pth','.safetensors','.pt'}: if path.suffix in {'.ckpt','.bin','.pth','.safetensors','.pt'}:
import_result = installer.heuristic_import(path) try:
new_models_found.update(import_result) import_result = installer.heuristic_import(path)
new_models_found.update(import_result)
except ValueError as e:
self.logger.warning(str(e))
self.logger.info(f'Scanned {items_scanned} files and directories, imported {len(new_models_found)} models') self.logger.info(f'Scanned {items_scanned} files and directories, imported {len(new_models_found)} models')
installed.update(new_models_found) installed.update(new_models_found)

View File

@ -59,7 +59,7 @@ class ModelProbe(object):
elif isinstance(model,(dict,ModelMixin,ConfigMixin)): elif isinstance(model,(dict,ModelMixin,ConfigMixin)):
return cls.probe(model_path=None, model=model, prediction_type_helper=prediction_type_helper) return cls.probe(model_path=None, model=model, prediction_type_helper=prediction_type_helper)
else: else:
raise Exception("model parameter {model} is neither a Path, nor a model") raise ValueError("model parameter {model} is neither a Path, nor a model")
@classmethod @classmethod
def probe(cls, def probe(cls,
@ -237,7 +237,7 @@ class CheckpointProbeBase(ProbeBase):
elif in_channels == 4: elif in_channels == 4:
return ModelVariantType.Normal return ModelVariantType.Normal
else: else:
raise Exception("Cannot determine variant type") raise ValueError(f"Cannot determine variant type (in_channels={in_channels}) at {self.checkpoint_path}")
class PipelineCheckpointProbe(CheckpointProbeBase): class PipelineCheckpointProbe(CheckpointProbeBase):
def get_base_type(self)->BaseModelType: def get_base_type(self)->BaseModelType:
@ -248,7 +248,7 @@ class PipelineCheckpointProbe(CheckpointProbeBase):
return BaseModelType.StableDiffusion1 return BaseModelType.StableDiffusion1
if key_name in state_dict and state_dict[key_name].shape[-1] == 1024: if key_name in state_dict and state_dict[key_name].shape[-1] == 1024:
return BaseModelType.StableDiffusion2 return BaseModelType.StableDiffusion2
raise Exception("Cannot determine base type") raise ValueError("Cannot determine base type")
def get_scheduler_prediction_type(self)->SchedulerPredictionType: def get_scheduler_prediction_type(self)->SchedulerPredictionType:
type = self.get_base_type() type = self.get_base_type()
@ -329,7 +329,7 @@ class ControlNetCheckpointProbe(CheckpointProbeBase):
return BaseModelType.StableDiffusion2 return BaseModelType.StableDiffusion2
elif self.checkpoint_path and self.helper: elif self.checkpoint_path and self.helper:
return self.helper(self.checkpoint_path) return self.helper(self.checkpoint_path)
raise Exception("Unable to determine base type for {self.checkpoint_path}") raise ValueError("Unable to determine base type for {self.checkpoint_path}")
######################################################## ########################################################
# classes for probing folders # classes for probing folders
@ -418,7 +418,7 @@ class ControlNetFolderProbe(FolderProbeBase):
def get_base_type(self)->BaseModelType: def get_base_type(self)->BaseModelType:
config_file = self.folder_path / 'config.json' config_file = self.folder_path / 'config.json'
if not config_file.exists(): if not config_file.exists():
raise Exception(f"Cannot determine base type for {self.folder_path}") raise ValueError(f"Cannot determine base type for {self.folder_path}")
with open(config_file,'r') as file: with open(config_file,'r') as file:
config = json.load(file) config = json.load(file)
# no obvious way to distinguish between sd2-base and sd2-768 # no obvious way to distinguish between sd2-base and sd2-768
@ -435,7 +435,7 @@ class LoRAFolderProbe(FolderProbeBase):
model_file = base_file model_file = base_file
break break
if not model_file: if not model_file:
raise Exception('Unknown LoRA format encountered') raise ValueError('Unknown LoRA format encountered')
return LoRACheckpointProbe(model_file,None).get_base_type() return LoRACheckpointProbe(model_file,None).get_base_type()
############## register probe classes ###### ############## register probe classes ######

View File

@ -2,7 +2,7 @@ import inspect
from enum import Enum from enum import Enum
from pydantic import BaseModel from pydantic import BaseModel
from typing import Literal, get_origin from typing import Literal, get_origin
from .base import BaseModelType, ModelType, SubModelType, ModelBase, ModelConfigBase, ModelVariantType, SchedulerPredictionType, ModelError, SilenceWarnings, ModelNotFoundException from .base import BaseModelType, ModelType, SubModelType, ModelBase, ModelConfigBase, ModelVariantType, SchedulerPredictionType, ModelError, SilenceWarnings, ModelNotFoundException, InvalidModelException
from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model
from .vae import VaeModel from .vae import VaeModel
from .lora import LoRAModel from .lora import LoRAModel

View File

@ -15,6 +15,9 @@ from contextlib import suppress
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union
class InvalidModelException(Exception):
pass
class ModelNotFoundException(Exception): class ModelNotFoundException(Exception):
pass pass

View File

@ -13,6 +13,7 @@ from .base import (
calc_model_size_by_fs, calc_model_size_by_fs,
calc_model_size_by_data, calc_model_size_by_data,
classproperty, classproperty,
InvalidModelException,
) )
class ControlNetModelFormat(str, Enum): class ControlNetModelFormat(str, Enum):
@ -73,10 +74,18 @@ class ControlNetModel(ModelBase):
@classmethod @classmethod
def detect_format(cls, path: str): def detect_format(cls, path: str):
if not os.path.exists(path):
raise ModelNotFoundException()
if os.path.isdir(path): if os.path.isdir(path):
return ControlNetModelFormat.Diffusers if os.path.exists(os.path.join(path, "config.json")):
else: return ControlNetModelFormat.Diffusers
return ControlNetModelFormat.Checkpoint
if os.path.isfile(path):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "pth"]]):
return ControlNetModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {path}")
@classmethod @classmethod
def convert_if_required( def convert_if_required(

View File

@ -9,6 +9,7 @@ from .base import (
ModelType, ModelType,
SubModelType, SubModelType,
classproperty, classproperty,
InvalidModelException,
) )
# TODO: naming # TODO: naming
from ..lora import LoRAModel as LoRAModelRaw from ..lora import LoRAModel as LoRAModelRaw
@ -56,10 +57,18 @@ class LoRAModel(ModelBase):
@classmethod @classmethod
def detect_format(cls, path: str): def detect_format(cls, path: str):
if not os.path.exists(path):
raise ModelNotFoundException()
if os.path.isdir(path): if os.path.isdir(path):
return LoRAModelFormat.Diffusers if os.path.exists(os.path.join(path, "pytorch_lora_weights.bin")):
else: return LoRAModelFormat.Diffusers
return LoRAModelFormat.LyCORIS
if os.path.isfile(path):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return LoRAModelFormat.LyCORIS
raise InvalidModelException(f"Not a valid model: {path}")
@classmethod @classmethod
def convert_if_required( def convert_if_required(

View File

@ -16,6 +16,7 @@ from .base import (
SilenceWarnings, SilenceWarnings,
read_checkpoint_meta, read_checkpoint_meta,
classproperty, classproperty,
InvalidModelException,
) )
from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.config import InvokeAIAppConfig
from omegaconf import OmegaConf from omegaconf import OmegaConf
@ -98,10 +99,18 @@ class StableDiffusion1Model(DiffusersModel):
@classmethod @classmethod
def detect_format(cls, model_path: str): def detect_format(cls, model_path: str):
if not os.path.exists(model_path):
raise ModelNotFoundException()
if os.path.isdir(model_path): if os.path.isdir(model_path):
return StableDiffusion1ModelFormat.Diffusers if os.path.exists(os.path.join(model_path, "model_index.json")):
else: return StableDiffusion1ModelFormat.Diffusers
return StableDiffusion1ModelFormat.Checkpoint
if os.path.isfile(model_path):
if any([model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return StableDiffusion1ModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {model_path}")
@classmethod @classmethod
def convert_if_required( def convert_if_required(
@ -200,10 +209,18 @@ class StableDiffusion2Model(DiffusersModel):
@classmethod @classmethod
def detect_format(cls, model_path: str): def detect_format(cls, model_path: str):
if not os.path.exists(model_path):
raise ModelNotFoundException()
if os.path.isdir(model_path): if os.path.isdir(model_path):
return StableDiffusion2ModelFormat.Diffusers if os.path.exists(os.path.join(model_path, "model_index.json")):
else: return StableDiffusion2ModelFormat.Diffusers
return StableDiffusion2ModelFormat.Checkpoint
if os.path.isfile(model_path):
if any([model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return StableDiffusion2ModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {model_path}")
@classmethod @classmethod
def convert_if_required( def convert_if_required(

View File

@ -9,6 +9,7 @@ from .base import (
SubModelType, SubModelType,
classproperty, classproperty,
ModelNotFoundException, ModelNotFoundException,
InvalidModelException,
) )
# TODO: naming # TODO: naming
from ..lora import TextualInversionModel as TextualInversionModelRaw from ..lora import TextualInversionModel as TextualInversionModelRaw
@ -59,7 +60,18 @@ class TextualInversionModel(ModelBase):
@classmethod @classmethod
def detect_format(cls, path: str): def detect_format(cls, path: str):
return None if not os.path.exists(path):
raise ModelNotFoundException()
if os.path.isdir(path):
if os.path.exists(os.path.join(path, "learned_embeds.bin")):
return None # diffusers-ti
if os.path.isfile(path):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return None
raise InvalidModelException(f"Not a valid model: {path}")
@classmethod @classmethod
def convert_if_required( def convert_if_required(

View File

@ -15,6 +15,7 @@ from .base import (
calc_model_size_by_fs, calc_model_size_by_fs,
calc_model_size_by_data, calc_model_size_by_data,
classproperty, classproperty,
InvalidModelException,
) )
from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.config import InvokeAIAppConfig
from diffusers.utils import is_safetensors_available from diffusers.utils import is_safetensors_available
@ -75,10 +76,18 @@ class VaeModel(ModelBase):
@classmethod @classmethod
def detect_format(cls, path: str): def detect_format(cls, path: str):
if not os.path.exists(path):
raise ModelNotFoundException()
if os.path.isdir(path): if os.path.isdir(path):
return VaeModelFormat.Diffusers if os.path.exists(os.path.join(path, "config.json")):
else: return VaeModelFormat.Diffusers
return VaeModelFormat.Checkpoint
if os.path.isfile(path):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return VaeModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {path}")
@classmethod @classmethod
def convert_if_required( def convert_if_required(

View File

@ -773,7 +773,7 @@ def main():
config.parse_args(invoke_args) config.parse_args(invoke_args)
logger = InvokeAILogger().getLogger(config=config) logger = InvokeAILogger().getLogger(config=config)
if not (config.root_dir / config.conf_path.parent).exists(): if not (config.conf_path / 'models.yaml').exists():
logger.info( logger.info(
"Your InvokeAI root directory is not set up. Calling invokeai-configure." "Your InvokeAI root directory is not set up. Calling invokeai-configure."
) )

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -12,7 +12,7 @@
margin: 0; margin: 0;
} }
</style> </style>
<script type="module" crossorigin src="./assets/index-581af3d4.js"></script> <script type="module" crossorigin src="./assets/index-078526aa.js"></script>
</head> </head>
<body dir="ltr"> <body dir="ltr">

View File

@ -53,7 +53,7 @@
"linear": "Linear", "linear": "Linear",
"nodes": "Node Editor", "nodes": "Node Editor",
"batch": "Batch Manager", "batch": "Batch Manager",
"modelmanager": "Model Manager", "modelManager": "Model Manager",
"postprocessing": "Post Processing", "postprocessing": "Post Processing",
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.", "nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
"postProcessing": "Post Processing", "postProcessing": "Post Processing",
@ -527,7 +527,9 @@
"showOptionsPanel": "Show Options Panel", "showOptionsPanel": "Show Options Panel",
"hidePreview": "Hide Preview", "hidePreview": "Hide Preview",
"showPreview": "Show Preview", "showPreview": "Show Preview",
"controlNetControlMode": "Control Mode" "controlNetControlMode": "Control Mode",
"clipSkip": "Clip Skip",
"aspectRatio": "Ratio"
}, },
"settings": { "settings": {
"models": "Models", "models": "Models",
@ -551,7 +553,8 @@
"generation": "Generation", "generation": "Generation",
"ui": "User Interface", "ui": "User Interface",
"favoriteSchedulers": "Favorite Schedulers", "favoriteSchedulers": "Favorite Schedulers",
"favoriteSchedulersPlaceholder": "No schedulers favorited" "favoriteSchedulersPlaceholder": "No schedulers favorited",
"showAdvancedOptions": "Show Advanced Options"
}, },
"toast": { "toast": {
"serverError": "Server Error", "serverError": "Server Error",
@ -669,6 +672,7 @@
}, },
"ui": { "ui": {
"showProgressImages": "Show Progress Images", "showProgressImages": "Show Progress Images",
"hideProgressImages": "Hide Progress Images" "hideProgressImages": "Hide Progress Images",
"swapSizes": "Swap Sizes"
} }
} }

View File

@ -53,7 +53,7 @@
"linear": "Linear", "linear": "Linear",
"nodes": "Node Editor", "nodes": "Node Editor",
"batch": "Batch Manager", "batch": "Batch Manager",
"modelmanager": "Model Manager", "modelManager": "Model Manager",
"postprocessing": "Post Processing", "postprocessing": "Post Processing",
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.", "nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
"postProcessing": "Post Processing", "postProcessing": "Post Processing",
@ -593,7 +593,10 @@
"metadataLoadFailed": "Failed to load metadata", "metadataLoadFailed": "Failed to load metadata",
"initialImageSet": "Initial Image Set", "initialImageSet": "Initial Image Set",
"initialImageNotSet": "Initial Image Not Set", "initialImageNotSet": "Initial Image Not Set",
"initialImageNotSetDesc": "Could not load initial image" "initialImageNotSetDesc": "Could not load initial image",
"nodesSaved": "Nodes Saved",
"nodesLoaded": "Nodes Loaded",
"nodesLoadedFailed": "Failed To Load Nodes"
}, },
"tooltip": { "tooltip": {
"feature": { "feature": {
@ -674,5 +677,10 @@
"showProgressImages": "Show Progress Images", "showProgressImages": "Show Progress Images",
"hideProgressImages": "Hide Progress Images", "hideProgressImages": "Hide Progress Images",
"swapSizes": "Swap Sizes" "swapSizes": "Swap Sizes"
},
"nodes": {
"reloadSchema": "Reload Schema",
"saveNodes": "Save Nodes",
"loadNodes": "Load Nodes"
} }
} }

View File

@ -102,6 +102,8 @@ export type AppFeature =
export type SDFeature = export type SDFeature =
| 'controlNet' | 'controlNet'
| 'noise' | 'noise'
| 'perlinNoise'
| 'noiseThreshold'
| 'variation' | 'variation'
| 'symmetry' | 'symmetry'
| 'seamless' | 'seamless'

View File

@ -53,13 +53,15 @@ const GalleryImage = (props: HoverableImageProps) => {
const handleClick = useCallback( const handleClick = useCallback(
(e: MouseEvent<HTMLDivElement>) => { (e: MouseEvent<HTMLDivElement>) => {
if (e.shiftKey) { // multiselect disabled for now
dispatch(imageRangeEndSelected(props.imageDTO.image_name)); // if (e.shiftKey) {
} else if (e.ctrlKey || e.metaKey) { // dispatch(imageRangeEndSelected(props.imageDTO.image_name));
dispatch(imageSelectionToggled(props.imageDTO.image_name)); // } else if (e.ctrlKey || e.metaKey) {
} else { // dispatch(imageSelectionToggled(props.imageDTO.image_name));
dispatch(imageSelected(props.imageDTO.image_name)); // } else {
} // dispatch(imageSelected(props.imageDTO.image_name));
// }
dispatch(imageSelected(props.imageDTO.image_name));
}, },
[dispatch, props.imageDTO.image_name] [dispatch, props.imageDTO.image_name]
); );
@ -121,6 +123,7 @@ const GalleryImage = (props: HoverableImageProps) => {
// withResetIcon // removed bc it's too easy to accidentally delete images // withResetIcon // removed bc it's too easy to accidentally delete images
isDropDisabled={true} isDropDisabled={true}
isUploadDisabled={true} isUploadDisabled={true}
thumbnail={true}
/> />
</Box> </Box>
)} )}

View File

@ -7,6 +7,7 @@ import {
OnConnectEnd, OnConnectEnd,
OnConnectStart, OnConnectStart,
OnEdgesChange, OnEdgesChange,
OnInit,
OnNodesChange, OnNodesChange,
ReactFlow, ReactFlow,
} from 'reactflow'; } from 'reactflow';
@ -16,6 +17,7 @@ import {
connectionStarted, connectionStarted,
edgesChanged, edgesChanged,
nodesChanged, nodesChanged,
setEditorInstance,
} from '../store/nodesSlice'; } from '../store/nodesSlice';
import { InvocationComponent } from './InvocationComponent'; import { InvocationComponent } from './InvocationComponent';
import ProgressImageNode from './ProgressImageNode'; import ProgressImageNode from './ProgressImageNode';
@ -67,6 +69,14 @@ export const Flow = () => {
dispatch(connectionEnded()); dispatch(connectionEnded());
}, [dispatch]); }, [dispatch]);
const onInit: OnInit = useCallback(
(v) => {
dispatch(setEditorInstance(v));
if (v) v.fitView();
},
[dispatch]
);
return ( return (
<ReactFlow <ReactFlow
nodeTypes={nodeTypes} nodeTypes={nodeTypes}
@ -77,6 +87,7 @@ export const Flow = () => {
onConnectStart={onConnectStart} onConnectStart={onConnectStart}
onConnect={onConnect} onConnect={onConnect}
onConnectEnd={onConnectEnd} onConnectEnd={onConnectEnd}
onInit={onInit}
defaultEdgeOptions={{ defaultEdgeOptions={{
style: { strokeWidth: 2 }, style: { strokeWidth: 2 },
}} }}

View File

@ -1,25 +1,21 @@
import { HStack } from '@chakra-ui/react'; import { HStack } from '@chakra-ui/react';
import { useAppDispatch } from 'app/store/storeHooks';
import IAIButton from 'common/components/IAIButton';
import { memo, useCallback } from 'react';
import { Panel } from 'reactflow';
import { receivedOpenAPISchema } from 'services/api/thunks/schema';
import NodeInvokeButton from '../ui/NodeInvokeButton';
import CancelButton from 'features/parameters/components/ProcessButtons/CancelButton'; import CancelButton from 'features/parameters/components/ProcessButtons/CancelButton';
import { memo } from 'react';
import { Panel } from 'reactflow';
import LoadNodesButton from '../ui/LoadNodesButton';
import NodeInvokeButton from '../ui/NodeInvokeButton';
import ReloadSchemaButton from '../ui/ReloadSchemaButton';
import SaveNodesButton from '../ui/SaveNodesButton';
const TopCenterPanel = () => { const TopCenterPanel = () => {
const dispatch = useAppDispatch();
const handleReloadSchema = useCallback(() => {
dispatch(receivedOpenAPISchema());
}, [dispatch]);
return ( return (
<Panel position="top-center"> <Panel position="top-center">
<HStack> <HStack>
<NodeInvokeButton /> <NodeInvokeButton />
<CancelButton /> <CancelButton />
<IAIButton onClick={handleReloadSchema}>Reload Schema</IAIButton> <ReloadSchemaButton />
<SaveNodesButton />
<LoadNodesButton />
</HStack> </HStack>
</Panel> </Panel>
); );

View File

@ -0,0 +1,79 @@
import { FileButton } from '@mantine/core';
import { makeToast } from 'app/components/Toaster';
import { useAppDispatch } from 'app/store/storeHooks';
import IAIIconButton from 'common/components/IAIIconButton';
import { loadFileEdges, loadFileNodes } from 'features/nodes/store/nodesSlice';
import { addToast } from 'features/system/store/systemSlice';
import { memo, useCallback, useRef } from 'react';
import { useTranslation } from 'react-i18next';
import { FaUpload } from 'react-icons/fa';
import { useReactFlow } from 'reactflow';
const LoadNodesButton = () => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const { fitView } = useReactFlow();
const uploadedFileRef = useRef<() => void>(null);
const restoreJSONToEditor = useCallback(
(v: File | null) => {
if (!v) return;
const reader = new FileReader();
reader.onload = async () => {
const json = reader.result;
const retrievedNodeTree = await JSON.parse(String(json));
if (!retrievedNodeTree) {
dispatch(
addToast(
makeToast({
title: t('toast.nodesLoadedFailed'),
status: 'error',
})
)
);
}
if (retrievedNodeTree) {
dispatch(loadFileNodes(retrievedNodeTree.nodes));
dispatch(loadFileEdges(retrievedNodeTree.edges));
fitView();
dispatch(
addToast(
makeToast({ title: t('toast.nodesLoaded'), status: 'success' })
)
);
}
// Cleanup
reader.abort();
};
reader.readAsText(v);
// Cleanup
uploadedFileRef.current?.();
},
[fitView, dispatch, t]
);
return (
<FileButton
resetRef={uploadedFileRef}
accept="application/json"
onChange={restoreJSONToEditor}
>
{(props) => (
<IAIIconButton
icon={<FaUpload />}
tooltip={t('nodes.loadNodes')}
aria-label={t('nodes.loadNodes')}
{...props}
/>
)}
</FileButton>
);
};
export default memo(LoadNodesButton);

View File

@ -0,0 +1,24 @@
import { useAppDispatch } from 'app/store/storeHooks';
import IAIIconButton from 'common/components/IAIIconButton';
import { useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { FaSyncAlt } from 'react-icons/fa';
import { receivedOpenAPISchema } from 'services/api/thunks/schema';
export default function ReloadSchemaButton() {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const handleReloadSchema = useCallback(() => {
dispatch(receivedOpenAPISchema());
}, [dispatch]);
return (
<IAIIconButton
icon={<FaSyncAlt />}
tooltip={t('nodes.reloadSchema')}
aria-label={t('nodes.reloadSchema')}
onClick={handleReloadSchema}
/>
);
}

View File

@ -0,0 +1,45 @@
import { RootState } from 'app/store/store';
import { useAppSelector } from 'app/store/storeHooks';
import IAIIconButton from 'common/components/IAIIconButton';
import { map, omit } from 'lodash-es';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { FaSave } from 'react-icons/fa';
const SaveNodesButton = () => {
const { t } = useTranslation();
const editorInstance = useAppSelector(
(state: RootState) => state.nodes.editorInstance
);
const saveEditorToJSON = useCallback(() => {
if (editorInstance) {
const editorState = editorInstance.toObject();
editorState.edges = map(editorState.edges, (edge) => {
return omit(edge, ['style']);
});
const nodeSetupJSON = new Blob([JSON.stringify(editorState)]);
const nodeDownloadElement = document.createElement('a');
nodeDownloadElement.href = URL.createObjectURL(nodeSetupJSON);
nodeDownloadElement.download = 'MyNodes.json';
document.body.appendChild(nodeDownloadElement);
nodeDownloadElement.click();
// Cleanup
nodeDownloadElement.remove();
}
}, [editorInstance]);
return (
<IAIIconButton
icon={<FaSave />}
fontSize={18}
tooltip={t('nodes.saveNodes')}
aria-label={t('nodes.saveNodes')}
onClick={saveEditorToJSON}
/>
);
};
export default memo(SaveNodesButton);

View File

@ -13,6 +13,7 @@ import {
Node, Node,
NodeChange, NodeChange,
OnConnectStartParams, OnConnectStartParams,
ReactFlowInstance,
} from 'reactflow'; } from 'reactflow';
import { receivedOpenAPISchema } from 'services/api/thunks/schema'; import { receivedOpenAPISchema } from 'services/api/thunks/schema';
import { ImageField } from 'services/api/types'; import { ImageField } from 'services/api/types';
@ -25,6 +26,7 @@ export type NodesState = {
invocationTemplates: Record<string, InvocationTemplate>; invocationTemplates: Record<string, InvocationTemplate>;
connectionStartParams: OnConnectStartParams | null; connectionStartParams: OnConnectStartParams | null;
shouldShowGraphOverlay: boolean; shouldShowGraphOverlay: boolean;
editorInstance: ReactFlowInstance | undefined;
}; };
export const initialNodesState: NodesState = { export const initialNodesState: NodesState = {
@ -34,6 +36,7 @@ export const initialNodesState: NodesState = {
invocationTemplates: {}, invocationTemplates: {},
connectionStartParams: null, connectionStartParams: null,
shouldShowGraphOverlay: false, shouldShowGraphOverlay: false,
editorInstance: undefined,
}; };
const nodesSlice = createSlice({ const nodesSlice = createSlice({
@ -121,6 +124,15 @@ const nodesSlice = createSlice({
nodeEditorReset: () => { nodeEditorReset: () => {
return { ...initialNodesState }; return { ...initialNodesState };
}, },
setEditorInstance: (state, action) => {
state.editorInstance = action.payload;
},
loadFileNodes: (state, action: PayloadAction<Node<InvocationValue>[]>) => {
state.nodes = action.payload;
},
loadFileEdges: (state, action: PayloadAction<Edge[]>) => {
state.edges = action.payload;
},
}, },
extraReducers: (builder) => { extraReducers: (builder) => {
builder.addCase(receivedOpenAPISchema.fulfilled, (state, action) => { builder.addCase(receivedOpenAPISchema.fulfilled, (state, action) => {
@ -141,6 +153,9 @@ export const {
nodeTemplatesBuilt, nodeTemplatesBuilt,
nodeEditorReset, nodeEditorReset,
imageCollectionFieldValueChanged, imageCollectionFieldValueChanged,
setEditorInstance,
loadFileNodes,
loadFileEdges,
} = nodesSlice.actions; } = nodesSlice.actions;
export default nodesSlice.reducer; export default nodesSlice.reducer;

View File

@ -3,6 +3,7 @@ import { RootState } from 'app/store/store';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import IAIButton from 'common/components/IAIButton'; import IAIButton from 'common/components/IAIButton';
import { setAspectRatio } from 'features/ui/store/uiSlice'; import { setAspectRatio } from 'features/ui/store/uiSlice';
import { activeTabNameSelector } from '../../../../ui/store/uiSelectors';
const aspectRatios = [ const aspectRatios = [
{ name: 'Free', value: null }, { name: 'Free', value: null },
@ -17,6 +18,10 @@ export default function ParamAspectRatio() {
); );
const dispatch = useAppDispatch(); const dispatch = useAppDispatch();
const shouldFitToWidthHeight = useAppSelector(
(state: RootState) => state.generation.shouldFitToWidthHeight
);
const activeTabName = useAppSelector(activeTabNameSelector);
return ( return (
<Flex gap={2} flexGrow={1}> <Flex gap={2} flexGrow={1}>
@ -26,6 +31,9 @@ export default function ParamAspectRatio() {
key={ratio.name} key={ratio.name}
size="sm" size="sm"
isChecked={aspectRatio === ratio.value} isChecked={aspectRatio === ratio.value}
isDisabled={
activeTabName === 'img2img' ? !shouldFitToWidthHeight : false
}
onClick={() => dispatch(setAspectRatio(ratio.value))} onClick={() => dispatch(setAspectRatio(ratio.value))}
> >
{ratio.name} {ratio.name}

View File

@ -8,6 +8,7 @@ import { MdOutlineSwapVert } from 'react-icons/md';
import ParamAspectRatio from './ParamAspectRatio'; import ParamAspectRatio from './ParamAspectRatio';
import ParamHeight from './ParamHeight'; import ParamHeight from './ParamHeight';
import ParamWidth from './ParamWidth'; import ParamWidth from './ParamWidth';
import { activeTabNameSelector } from '../../../../ui/store/uiSelectors';
export default function ParamSize() { export default function ParamSize() {
const { t } = useTranslation(); const { t } = useTranslation();
@ -15,6 +16,7 @@ export default function ParamSize() {
const shouldFitToWidthHeight = useAppSelector( const shouldFitToWidthHeight = useAppSelector(
(state: RootState) => state.generation.shouldFitToWidthHeight (state: RootState) => state.generation.shouldFitToWidthHeight
); );
const activeTabName = useAppSelector(activeTabNameSelector);
return ( return (
<Flex <Flex
sx={{ sx={{
@ -50,13 +52,24 @@ export default function ParamSize() {
size="sm" size="sm"
icon={<MdOutlineSwapVert />} icon={<MdOutlineSwapVert />}
fontSize={20} fontSize={20}
isDisabled={
activeTabName === 'img2img' ? !shouldFitToWidthHeight : false
}
onClick={() => dispatch(toggleSize())} onClick={() => dispatch(toggleSize())}
/> />
</Flex> </Flex>
<Flex gap={2} alignItems="center"> <Flex gap={2} alignItems="center">
<Flex gap={2} flexDirection="column" width="full"> <Flex gap={2} flexDirection="column" width="full">
<ParamWidth isDisabled={!shouldFitToWidthHeight} /> <ParamWidth
<ParamHeight isDisabled={!shouldFitToWidthHeight} /> isDisabled={
activeTabName === 'img2img' ? !shouldFitToWidthHeight : false
}
/>
<ParamHeight
isDisabled={
activeTabName === 'img2img' ? !shouldFitToWidthHeight : false
}
/>
</Flex> </Flex>
</Flex> </Flex>
</Flex> </Flex>

View File

@ -27,6 +27,9 @@ const ParamNoiseCollapse = () => {
const { t } = useTranslation(); const { t } = useTranslation();
const isNoiseEnabled = useFeatureStatus('noise').isFeatureEnabled; const isNoiseEnabled = useFeatureStatus('noise').isFeatureEnabled;
const isPerlinNoiseEnabled = useFeatureStatus('perlinNoise').isFeatureEnabled;
const isNoiseThresholdEnabled =
useFeatureStatus('noiseThreshold').isFeatureEnabled;
const { activeLabel } = useAppSelector(selector); const { activeLabel } = useAppSelector(selector);
@ -42,8 +45,8 @@ const ParamNoiseCollapse = () => {
<Flex sx={{ gap: 2, flexDirection: 'column' }}> <Flex sx={{ gap: 2, flexDirection: 'column' }}>
<ParamNoiseToggle /> <ParamNoiseToggle />
<ParamCpuNoiseToggle /> <ParamCpuNoiseToggle />
<ParamPerlinNoise /> {isPerlinNoiseEnabled && <ParamPerlinNoise />}
<ParamNoiseThreshold /> {isNoiseThresholdEnabled && <ParamNoiseThreshold />}
</Flex> </Flex>
</IAICollapse> </IAICollapse>
); );

View File

@ -6,8 +6,15 @@ import { merge } from 'lodash-es';
export const initialConfigState: AppConfig = { export const initialConfigState: AppConfig = {
shouldUpdateImagesOnConnect: false, shouldUpdateImagesOnConnect: false,
disabledTabs: [], disabledTabs: [],
disabledFeatures: [], disabledFeatures: ['lightbox', 'faceRestore'],
disabledSDFeatures: [], disabledSDFeatures: [
'variation',
'seamless',
'symmetry',
'hires',
'perlinNoise',
'noiseThreshold',
],
canRestoreDeletedImagesFromBin: true, canRestoreDeletedImagesFromBin: true,
sd: { sd: {
disabledControlNetModels: [], disabledControlNetModels: [],

View File

@ -38,6 +38,7 @@ import NodesTab from './tabs/Nodes/NodesTab';
import ResizeHandle from './tabs/ResizeHandle'; import ResizeHandle from './tabs/ResizeHandle';
import TextToImageTab from './tabs/TextToImage/TextToImageTab'; import TextToImageTab from './tabs/TextToImage/TextToImageTab';
import UnifiedCanvasTab from './tabs/UnifiedCanvas/UnifiedCanvasTab'; import UnifiedCanvasTab from './tabs/UnifiedCanvas/UnifiedCanvasTab';
import { useFeatureStatus } from '../../system/hooks/useFeatureStatus';
export interface InvokeTabInfo { export interface InvokeTabInfo {
id: InvokeTabName; id: InvokeTabName;
@ -107,6 +108,7 @@ const InvokeTabs = () => {
const isLightBoxOpen = useAppSelector( const isLightBoxOpen = useAppSelector(
(state: RootState) => state.lightbox.isLightboxOpen (state: RootState) => state.lightbox.isLightboxOpen
); );
const isLightboxEnabled = useFeatureStatus('lightbox').isFeatureEnabled;
const { shouldPinGallery, shouldPinParametersPanel, shouldShowGallery } = const { shouldPinGallery, shouldPinParametersPanel, shouldShowGallery } =
useAppSelector((state: RootState) => state.ui); useAppSelector((state: RootState) => state.ui);
@ -119,7 +121,9 @@ const InvokeTabs = () => {
useHotkeys( useHotkeys(
'z', 'z',
() => { () => {
dispatch(setIsLightboxOpen(!isLightBoxOpen)); if (isLightboxEnabled) {
dispatch(setIsLightboxOpen(!isLightBoxOpen));
}
}, },
[isLightBoxOpen] [isLightBoxOpen]
); );

View File

@ -1 +1 @@
__version__ = "3.0.0+b1" __version__ = "3.0.0+b5"

View File

@ -15,7 +15,7 @@ InvokeAI:
Features: Features:
nsfw_checker: False nsfw_checker: False
Memory/Performance: Memory/Performance:
max_loaded_models: 5 max_cache_size: 5
''' '''
) )
@ -25,7 +25,7 @@ InvokeAI:
Features: Features:
nsfw_checker: true nsfw_checker: true
Memory/Performance: Memory/Performance:
max_loaded_models: 2 max_cache_size: 2
''' '''
) )
@ -36,46 +36,46 @@ def test_use_init():
conf1 = InvokeAIAppConfig.get_config() conf1 = InvokeAIAppConfig.get_config()
assert conf1 assert conf1
conf1.parse_args(conf=init1,argv=[]) conf1.parse_args(conf=init1,argv=[])
assert conf1.max_loaded_models==5 assert conf1.max_cache_size==5
assert not conf1.nsfw_checker assert not conf1.nsfw_checker
conf2 = InvokeAIAppConfig.get_config() conf2 = InvokeAIAppConfig.get_config()
assert conf2 assert conf2
conf2.parse_args(conf=init2,argv=[]) conf2.parse_args(conf=init2,argv=[])
assert conf2.nsfw_checker assert conf2.nsfw_checker
assert conf2.max_loaded_models==2 assert conf2.max_cache_size==2
assert not hasattr(conf2,'invalid_attribute') assert not hasattr(conf2,'invalid_attribute')
def test_argv_override(): def test_argv_override():
conf = InvokeAIAppConfig.get_config() conf = InvokeAIAppConfig.get_config()
conf.parse_args(conf=init1,argv=['--nsfw_checker','--max_loaded=10']) conf.parse_args(conf=init1,argv=['--nsfw_checker','--max_cache=10'])
assert conf.nsfw_checker assert conf.nsfw_checker
assert conf.max_loaded_models==10 assert conf.max_cache_size==10
assert conf.outdir==Path('outputs') # this is the default assert conf.outdir==Path('outputs') # this is the default
def test_env_override(): def test_env_override():
# argv overrides # argv overrides
conf = InvokeAIAppConfig() conf = InvokeAIAppConfig()
conf.parse_args(conf=init1,argv=['--max_loaded=10']) conf.parse_args(conf=init1,argv=['--max_cache=10'])
assert conf.nsfw_checker==False assert conf.nsfw_checker==False
os.environ['INVOKEAI_nsfw_checker'] = 'True' os.environ['INVOKEAI_nsfw_checker'] = 'True'
conf.parse_args(conf=init1,argv=['--max_loaded=10']) conf.parse_args(conf=init1,argv=['--max_cache=10'])
assert conf.nsfw_checker==True assert conf.nsfw_checker==True
# environment variables should be case insensitive # environment variables should be case insensitive
os.environ['InvokeAI_Max_Loaded_Models'] = '15' os.environ['InvokeAI_Max_Cache_Size'] = '15'
conf = InvokeAIAppConfig() conf = InvokeAIAppConfig()
conf.parse_args(conf=init1,argv=[]) conf.parse_args(conf=init1,argv=[])
assert conf.max_loaded_models == 15 assert conf.max_cache_size == 15
conf = InvokeAIAppConfig() conf = InvokeAIAppConfig()
conf.parse_args(conf=init1,argv=['--no-nsfw_checker','--max_loaded=10']) conf.parse_args(conf=init1,argv=['--no-nsfw_checker','--max_cache=10'])
assert conf.nsfw_checker==False assert conf.nsfw_checker==False
assert conf.max_loaded_models==10 assert conf.max_cache_size==10
conf = InvokeAIAppConfig.get_config(max_loaded_models=20) conf = InvokeAIAppConfig.get_config(max_cache_size=20)
conf.parse_args(conf=init1,argv=[]) conf.parse_args(conf=init1,argv=[])
assert conf.max_loaded_models==20 assert conf.max_cache_size==20
def test_type_coercion(): def test_type_coercion():
conf = InvokeAIAppConfig().get_config() conf = InvokeAIAppConfig().get_config()