mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into save-load-nodes
This commit is contained in:
commit
2cfe67bf1f
@ -1,25 +1,9 @@
|
|||||||
# use this file as a whitelist
|
|
||||||
*
|
*
|
||||||
!invokeai
|
!invokeai
|
||||||
!ldm
|
|
||||||
!pyproject.toml
|
!pyproject.toml
|
||||||
|
!docker/docker-entrypoint.sh
|
||||||
|
!LICENSE
|
||||||
|
|
||||||
# ignore frontend/web but whitelist dist
|
**/node_modules
|
||||||
invokeai/frontend/web/
|
**/__pycache__
|
||||||
!invokeai/frontend/web/dist/
|
**/*.egg-info
|
||||||
|
|
||||||
# ignore invokeai/assets but whitelist invokeai/assets/web
|
|
||||||
invokeai/assets/
|
|
||||||
!invokeai/assets/web/
|
|
||||||
|
|
||||||
# Guard against pulling in any models that might exist in the directory tree
|
|
||||||
**/*.pt*
|
|
||||||
**/*.ckpt
|
|
||||||
|
|
||||||
# Byte-compiled / optimized / DLL files
|
|
||||||
**/__pycache__/
|
|
||||||
**/*.py[cod]
|
|
||||||
|
|
||||||
# Distribution / packaging
|
|
||||||
**/*.egg-info/
|
|
||||||
**/*.egg
|
|
79
.github/workflows/build-container.yml
vendored
79
.github/workflows/build-container.yml
vendored
@ -3,17 +3,15 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'main'
|
- 'main'
|
||||||
- 'update/ci/docker/*'
|
|
||||||
- 'update/docker/*'
|
|
||||||
- 'dev/ci/docker/*'
|
|
||||||
- 'dev/docker/*'
|
|
||||||
paths:
|
paths:
|
||||||
- 'pyproject.toml'
|
- 'pyproject.toml'
|
||||||
- '.dockerignore'
|
- '.dockerignore'
|
||||||
- 'invokeai/**'
|
- 'invokeai/**'
|
||||||
- 'docker/Dockerfile'
|
- 'docker/Dockerfile'
|
||||||
|
- 'docker/docker-entrypoint.sh'
|
||||||
|
- 'workflows/build-container.yml'
|
||||||
tags:
|
tags:
|
||||||
- 'v*.*.*'
|
- 'v*'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
@ -26,23 +24,27 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
flavor:
|
gpu-driver:
|
||||||
- rocm
|
|
||||||
- cuda
|
- cuda
|
||||||
- cpu
|
- cpu
|
||||||
include:
|
- rocm
|
||||||
- flavor: rocm
|
|
||||||
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
|
||||||
- flavor: cuda
|
|
||||||
pip-extra-index-url: ''
|
|
||||||
- flavor: cpu
|
|
||||||
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: ${{ matrix.flavor }}
|
name: ${{ matrix.gpu-driver }}
|
||||||
env:
|
env:
|
||||||
PLATFORMS: 'linux/amd64,linux/arm64'
|
# torch/arm64 does not support GPU currently, so arm64 builds
|
||||||
DOCKERFILE: 'docker/Dockerfile'
|
# would not be GPU-accelerated.
|
||||||
|
# re-enable arm64 if there is sufficient demand.
|
||||||
|
# PLATFORMS: 'linux/amd64,linux/arm64'
|
||||||
|
PLATFORMS: 'linux/amd64'
|
||||||
steps:
|
steps:
|
||||||
|
- name: Free up more disk space on the runner
|
||||||
|
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
|
||||||
|
run: |
|
||||||
|
sudo rm -rf /usr/share/dotnet
|
||||||
|
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||||
|
sudo swapoff /mnt/swapfile
|
||||||
|
sudo rm -rf /mnt/swapfile
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
@ -53,7 +55,7 @@ jobs:
|
|||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
images: |
|
images: |
|
||||||
ghcr.io/${{ github.repository }}
|
ghcr.io/${{ github.repository }}
|
||||||
${{ vars.DOCKERHUB_REPOSITORY }}
|
${{ env.DOCKERHUB_REPOSITORY }}
|
||||||
tags: |
|
tags: |
|
||||||
type=ref,event=branch
|
type=ref,event=branch
|
||||||
type=ref,event=tag
|
type=ref,event=tag
|
||||||
@ -62,8 +64,8 @@ jobs:
|
|||||||
type=pep440,pattern={{major}}
|
type=pep440,pattern={{major}}
|
||||||
type=sha,enable=true,prefix=sha-,format=short
|
type=sha,enable=true,prefix=sha-,format=short
|
||||||
flavor: |
|
flavor: |
|
||||||
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
|
latest=${{ matrix.gpu-driver == 'cuda' && github.ref == 'refs/heads/main' }}
|
||||||
suffix=-${{ matrix.flavor }},onlatest=false
|
suffix=-${{ matrix.gpu-driver }},onlatest=false
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v2
|
||||||
@ -81,34 +83,33 @@ jobs:
|
|||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Login to Docker Hub
|
# - name: Login to Docker Hub
|
||||||
if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
|
# if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
|
||||||
uses: docker/login-action@v2
|
# uses: docker/login-action@v2
|
||||||
with:
|
# with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
# password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build container
|
- name: Build container
|
||||||
id: docker_build
|
id: docker_build
|
||||||
uses: docker/build-push-action@v4
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: ${{ env.DOCKERFILE }}
|
file: docker/Dockerfile
|
||||||
platforms: ${{ env.PLATFORMS }}
|
platforms: ${{ env.PLATFORMS }}
|
||||||
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' }}
|
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' }}
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
|
|
||||||
cache-from: |
|
cache-from: |
|
||||||
type=gha,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
type=gha,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
|
||||||
type=gha,scope=main-${{ matrix.flavor }}
|
type=gha,scope=main-${{ matrix.gpu-driver }}
|
||||||
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
|
||||||
|
|
||||||
- name: Docker Hub Description
|
# - name: Docker Hub Description
|
||||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
|
# if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
|
||||||
uses: peter-evans/dockerhub-description@v3
|
# uses: peter-evans/dockerhub-description@v3
|
||||||
with:
|
# with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
# password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
repository: ${{ vars.DOCKERHUB_REPOSITORY }}
|
# repository: ${{ vars.DOCKERHUB_REPOSITORY }}
|
||||||
short-description: ${{ github.event.repository.description }}
|
# short-description: ${{ github.event.repository.description }}
|
||||||
|
13
docker/.env.sample
Normal file
13
docker/.env.sample
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
## Make a copy of this file named `.env` and fill in the values below.
|
||||||
|
## Any environment variables supported by InvokeAI can be specified here.
|
||||||
|
|
||||||
|
# INVOKEAI_ROOT is the path to a path on the local filesystem where InvokeAI will store data.
|
||||||
|
# Outputs will also be stored here by default.
|
||||||
|
# This **must** be an absolute path.
|
||||||
|
INVOKEAI_ROOT=
|
||||||
|
|
||||||
|
HUGGINGFACE_TOKEN=
|
||||||
|
|
||||||
|
## optional variables specific to the docker setup
|
||||||
|
# GPU_DRIVER=cuda
|
||||||
|
# CONTAINER_UID=1000
|
@ -1,107 +1,129 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1.4
|
||||||
|
|
||||||
ARG PYTHON_VERSION=3.9
|
## Builder stage
|
||||||
##################
|
|
||||||
## base image ##
|
|
||||||
##################
|
|
||||||
FROM --platform=${TARGETPLATFORM} python:${PYTHON_VERSION}-slim AS python-base
|
|
||||||
|
|
||||||
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
FROM library/ubuntu:22.04 AS builder
|
||||||
|
|
||||||
# Prepare apt for buildkit cache
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||||
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
|
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
|
|
||||||
# Install dependencies
|
|
||||||
RUN \
|
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
apt-get update \
|
apt update && apt-get install -y \
|
||||||
&& apt-get install -y \
|
git \
|
||||||
--no-install-recommends \
|
python3.10-venv \
|
||||||
libgl1-mesa-glx=20.3.* \
|
python3-pip \
|
||||||
libglib2.0-0=2.66.* \
|
build-essential
|
||||||
libopencv-dev=4.5.*
|
|
||||||
|
|
||||||
# Set working directory and env
|
ENV INVOKEAI_SRC=/opt/invokeai
|
||||||
ARG APPDIR=/usr/src
|
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||||
ARG APPNAME=InvokeAI
|
|
||||||
WORKDIR ${APPDIR}
|
|
||||||
ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH
|
|
||||||
# Keeps Python from generating .pyc files in the container
|
|
||||||
ENV PYTHONDONTWRITEBYTECODE 1
|
|
||||||
# Turns off buffering for easier container logging
|
|
||||||
ENV PYTHONUNBUFFERED 1
|
|
||||||
# Don't fall back to legacy build system
|
|
||||||
ENV PIP_USE_PEP517=1
|
|
||||||
|
|
||||||
#######################
|
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||||
## build pyproject ##
|
ARG TORCH_VERSION=2.0.1
|
||||||
#######################
|
ARG TORCHVISION_VERSION=0.15.2
|
||||||
FROM python-base AS pyproject-builder
|
ARG GPU_DRIVER=cuda
|
||||||
|
ARG TARGETPLATFORM="linux/amd64"
|
||||||
|
# unused but available
|
||||||
|
ARG BUILDPLATFORM
|
||||||
|
|
||||||
# Install build dependencies
|
WORKDIR ${INVOKEAI_SRC}
|
||||||
RUN \
|
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
||||||
apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
build-essential=12.9 \
|
|
||||||
gcc=4:10.2.* \
|
|
||||||
python3-dev=3.9.*
|
|
||||||
|
|
||||||
# Prepare pip for buildkit cache
|
# Install pytorch before all other pip packages
|
||||||
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
|
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
|
||||||
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
|
# x86_64/CUDA is default
|
||||||
RUN mkdir -p ${PIP_CACHE_DIR}
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||||
|
python3 -m venv ${VIRTUAL_ENV} &&\
|
||||||
|
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
|
||||||
|
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
|
||||||
|
elif [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||||
|
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.4.2"; \
|
||||||
|
else \
|
||||||
|
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu118"; \
|
||||||
|
fi &&\
|
||||||
|
pip install $extra_index_url_arg \
|
||||||
|
torch==$TORCH_VERSION \
|
||||||
|
torchvision==$TORCHVISION_VERSION
|
||||||
|
|
||||||
# Create virtual environment
|
# Install the local package.
|
||||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
# Editable mode helps use the same image for development:
|
||||||
python3 -m venv "${APPNAME}" \
|
# the local working copy can be bind-mounted into the image
|
||||||
--upgrade-deps
|
# at path defined by ${INVOKEAI_SRC}
|
||||||
|
COPY invokeai ./invokeai
|
||||||
|
COPY pyproject.toml ./
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||||
|
# xformers + triton fails to install on arm64
|
||||||
|
if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||||
|
pip install -e ".[xformers]"; \
|
||||||
|
else \
|
||||||
|
pip install -e "."; \
|
||||||
|
fi
|
||||||
|
|
||||||
# Install requirements
|
# #### Build the Web UI ------------------------------------
|
||||||
COPY --link pyproject.toml .
|
|
||||||
COPY --link invokeai/version/invokeai_version.py invokeai/version/__init__.py invokeai/version/
|
|
||||||
ARG PIP_EXTRA_INDEX_URL
|
|
||||||
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
|
||||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
|
||||||
"${APPNAME}"/bin/pip install .
|
|
||||||
|
|
||||||
# Install pyproject.toml
|
FROM node:18 AS web-builder
|
||||||
COPY --link . .
|
WORKDIR /build
|
||||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
COPY invokeai/frontend/web/ ./
|
||||||
"${APPNAME}/bin/pip" install .
|
RUN --mount=type=cache,target=/usr/lib/node_modules \
|
||||||
|
npm install --include dev
|
||||||
|
RUN --mount=type=cache,target=/usr/lib/node_modules \
|
||||||
|
yarn vite build
|
||||||
|
|
||||||
# Build patchmatch
|
|
||||||
|
#### Runtime stage ---------------------------------------
|
||||||
|
|
||||||
|
FROM library/ubuntu:22.04 AS runtime
|
||||||
|
|
||||||
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
ENV PYTHONUNBUFFERED=1
|
||||||
|
ENV PYTHONDONTWRITEBYTECODE=1
|
||||||
|
|
||||||
|
RUN apt update && apt install -y --no-install-recommends \
|
||||||
|
git \
|
||||||
|
curl \
|
||||||
|
vim \
|
||||||
|
tmux \
|
||||||
|
ncdu \
|
||||||
|
iotop \
|
||||||
|
bzip2 \
|
||||||
|
gosu \
|
||||||
|
libglib2.0-0 \
|
||||||
|
libgl1-mesa-glx \
|
||||||
|
python3-venv \
|
||||||
|
python3-pip \
|
||||||
|
build-essential \
|
||||||
|
libopencv-dev \
|
||||||
|
libstdc++-10-dev &&\
|
||||||
|
apt-get clean && apt-get autoclean
|
||||||
|
|
||||||
|
# globally add magic-wormhole
|
||||||
|
# for ease of transferring data to and from the container
|
||||||
|
# when running in sandboxed cloud environments; e.g. Runpod etc.
|
||||||
|
RUN pip install magic-wormhole
|
||||||
|
|
||||||
|
ENV INVOKEAI_SRC=/opt/invokeai
|
||||||
|
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||||
|
ENV INVOKEAI_ROOT=/invokeai
|
||||||
|
ENV PATH="$VIRTUAL_ENV/bin:$INVOKEAI_SRC:$PATH"
|
||||||
|
|
||||||
|
# --link requires buldkit w/ dockerfile syntax 1.4
|
||||||
|
COPY --link --from=builder ${INVOKEAI_SRC} ${INVOKEAI_SRC}
|
||||||
|
COPY --link --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||||
|
COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist
|
||||||
|
|
||||||
|
# Link amdgpu.ids for ROCm builds
|
||||||
|
# contributed by https://github.com/Rubonnek
|
||||||
|
RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
|
||||||
|
ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids"
|
||||||
|
|
||||||
|
WORKDIR ${INVOKEAI_SRC}
|
||||||
|
|
||||||
|
# build patchmatch
|
||||||
|
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
||||||
RUN python3 -c "from patchmatch import patch_match"
|
RUN python3 -c "from patchmatch import patch_match"
|
||||||
|
|
||||||
#####################
|
# Create unprivileged user and make the local dir
|
||||||
## runtime image ##
|
RUN useradd --create-home --shell /bin/bash -u 1000 --comment "container local user" invoke
|
||||||
#####################
|
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R invoke:invoke ${INVOKEAI_ROOT}
|
||||||
FROM python-base AS runtime
|
|
||||||
|
|
||||||
# Create a new user
|
COPY docker/docker-entrypoint.sh ./
|
||||||
ARG UNAME=appuser
|
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
|
||||||
RUN useradd \
|
CMD ["invokeai-web", "--host", "0.0.0.0"]
|
||||||
--no-log-init \
|
|
||||||
-m \
|
|
||||||
-U \
|
|
||||||
"${UNAME}"
|
|
||||||
|
|
||||||
# Create volume directory
|
|
||||||
ARG VOLUME_DIR=/data
|
|
||||||
RUN mkdir -p "${VOLUME_DIR}" \
|
|
||||||
&& chown -hR "${UNAME}:${UNAME}" "${VOLUME_DIR}"
|
|
||||||
|
|
||||||
# Setup runtime environment
|
|
||||||
USER ${UNAME}:${UNAME}
|
|
||||||
COPY --chown=${UNAME}:${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME}
|
|
||||||
ENV INVOKEAI_ROOT ${VOLUME_DIR}
|
|
||||||
ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache
|
|
||||||
ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only"
|
|
||||||
EXPOSE 9090
|
|
||||||
ENTRYPOINT [ "invokeai" ]
|
|
||||||
CMD [ "--web", "--host", "0.0.0.0", "--port", "9090" ]
|
|
||||||
VOLUME [ "${VOLUME_DIR}" ]
|
|
||||||
|
77
docker/README.md
Normal file
77
docker/README.md
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
# InvokeAI Containerized
|
||||||
|
|
||||||
|
All commands are to be run from the `docker` directory: `cd docker`
|
||||||
|
|
||||||
|
#### Linux
|
||||||
|
|
||||||
|
1. Ensure builkit is enabled in the Docker daemon settings (`/etc/docker/daemon.json`)
|
||||||
|
2. Install the `docker compose` plugin using your package manager, or follow a [tutorial](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-compose-on-ubuntu-22-04).
|
||||||
|
- The deprecated `docker-compose` (hyphenated) CLI continues to work for now.
|
||||||
|
3. Ensure docker daemon is able to access the GPU.
|
||||||
|
- You may need to install [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
|
||||||
|
|
||||||
|
#### macOS
|
||||||
|
|
||||||
|
1. Ensure Docker has at least 16GB RAM
|
||||||
|
2. Enable VirtioFS for file sharing
|
||||||
|
3. Enable `docker compose` V2 support
|
||||||
|
|
||||||
|
This is done via Docker Desktop preferences
|
||||||
|
|
||||||
|
## Quickstart
|
||||||
|
|
||||||
|
|
||||||
|
1. Make a copy of `env.sample` and name it `.env` (`cp env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to:
|
||||||
|
a. the desired location of the InvokeAI runtime directory, or
|
||||||
|
b. an existing, v3.0.0 compatible runtime directory.
|
||||||
|
1. `docker compose up`
|
||||||
|
|
||||||
|
The image will be built automatically if needed.
|
||||||
|
|
||||||
|
The runtime directory (holding models and outputs) will be created in the location specified by `INVOKEAI_ROOT`. The default location is `~/invokeai`. The runtime directory will be populated with the base configs and models necessary to start generating.
|
||||||
|
|
||||||
|
### Use a GPU
|
||||||
|
|
||||||
|
- Linux is *recommended* for GPU support in Docker.
|
||||||
|
- WSL2 is *required* for Windows.
|
||||||
|
- only `x86_64` architecture is supported.
|
||||||
|
|
||||||
|
The Docker daemon on the system must be already set up to use the GPU. In case of Linux, this involves installing `nvidia-docker-runtime` and configuring the `nvidia` runtime as default. Steps will be different for AMD. Please see Docker documentation for the most up-to-date instructions for using your GPU with Docker.
|
||||||
|
|
||||||
|
## Customize
|
||||||
|
|
||||||
|
Check the `.env.sample` file. It contains some environment variables for running in Docker. Copy it, name it `.env`, and fill it in with your own values. Next time you run `docker compose up`, your custom values will be used.
|
||||||
|
|
||||||
|
You can also set these values in `docker compose.yml` directly, but `.env` will help avoid conflicts when code is updated.
|
||||||
|
|
||||||
|
Example (most values are optional):
|
||||||
|
|
||||||
|
```
|
||||||
|
INVOKEAI_ROOT=/Volumes/WorkDrive/invokeai
|
||||||
|
HUGGINGFACE_TOKEN=the_actual_token
|
||||||
|
CONTAINER_UID=1000
|
||||||
|
GPU_DRIVER=cuda
|
||||||
|
```
|
||||||
|
|
||||||
|
## Even Moar Customizing!
|
||||||
|
|
||||||
|
See the `docker compose.yaml` file. The `command` instruction can be uncommented and used to run arbitrary startup commands. Some examples below.
|
||||||
|
|
||||||
|
### Reconfigure the runtime directory
|
||||||
|
|
||||||
|
Can be used to download additional models from the supported model list
|
||||||
|
|
||||||
|
In conjunction with `INVOKEAI_ROOT` can be also used to initialize a runtime directory
|
||||||
|
|
||||||
|
```
|
||||||
|
command:
|
||||||
|
- invokeai-configure
|
||||||
|
- --yes
|
||||||
|
```
|
||||||
|
|
||||||
|
Or install models:
|
||||||
|
|
||||||
|
```
|
||||||
|
command:
|
||||||
|
- invokeai-model-install
|
||||||
|
```
|
@ -1,51 +1,11 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# If you want to build a specific flavor, set the CONTAINER_FLAVOR environment variable
|
build_args=""
|
||||||
# e.g. CONTAINER_FLAVOR=cpu ./build.sh
|
|
||||||
# Possible Values are:
|
|
||||||
# - cpu
|
|
||||||
# - cuda
|
|
||||||
# - rocm
|
|
||||||
# Don't forget to also set it when executing run.sh
|
|
||||||
# if it is not set, the script will try to detect the flavor by itself.
|
|
||||||
#
|
|
||||||
# Doc can be found here:
|
|
||||||
# https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
|
||||||
|
|
||||||
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
[[ -f ".env" ]] && build_args=$(awk '$1 ~ /\=[^$]/ {print "--build-arg " $0 " "}' .env)
|
||||||
cd "$SCRIPTDIR" || exit 1
|
|
||||||
|
|
||||||
source ./env.sh
|
echo "docker-compose build args:"
|
||||||
|
echo $build_args
|
||||||
|
|
||||||
DOCKERFILE=${INVOKE_DOCKERFILE:-./Dockerfile}
|
docker-compose build $build_args
|
||||||
|
|
||||||
# print the settings
|
|
||||||
echo -e "You are using these values:\n"
|
|
||||||
echo -e "Dockerfile:\t\t${DOCKERFILE}"
|
|
||||||
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
|
|
||||||
echo -e "Volumename:\t\t${VOLUMENAME}"
|
|
||||||
echo -e "Platform:\t\t${PLATFORM}"
|
|
||||||
echo -e "Container Registry:\t${CONTAINER_REGISTRY}"
|
|
||||||
echo -e "Container Repository:\t${CONTAINER_REPOSITORY}"
|
|
||||||
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
|
|
||||||
echo -e "Container Flavor:\t${CONTAINER_FLAVOR}"
|
|
||||||
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
|
|
||||||
|
|
||||||
# Create docker volume
|
|
||||||
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
|
|
||||||
echo -e "Volume already exists\n"
|
|
||||||
else
|
|
||||||
echo -n "creating docker volume "
|
|
||||||
docker volume create "${VOLUMENAME}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Build Container
|
|
||||||
docker build \
|
|
||||||
--platform="${PLATFORM:-linux/amd64}" \
|
|
||||||
--tag="${CONTAINER_IMAGE:-invokeai}" \
|
|
||||||
${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \
|
|
||||||
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
|
|
||||||
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
|
|
||||||
--file="${DOCKERFILE}" \
|
|
||||||
..
|
|
||||||
|
48
docker/docker-compose.yml
Normal file
48
docker/docker-compose.yml
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
# Copyright (c) 2023 Eugene Brodsky https://github.com/ebr
|
||||||
|
|
||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
invokeai:
|
||||||
|
image: "local/invokeai:latest"
|
||||||
|
# edit below to run on a container runtime other than nvidia-container-runtime.
|
||||||
|
# not yet tested with rocm/AMD GPUs
|
||||||
|
# Comment out the "deploy" section to run on CPU only
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
reservations:
|
||||||
|
devices:
|
||||||
|
- driver: nvidia
|
||||||
|
count: 1
|
||||||
|
capabilities: [gpu]
|
||||||
|
build:
|
||||||
|
context: ..
|
||||||
|
dockerfile: docker/Dockerfile
|
||||||
|
|
||||||
|
# variables without a default will automatically inherit from the host environment
|
||||||
|
environment:
|
||||||
|
- INVOKEAI_ROOT
|
||||||
|
- HF_HOME
|
||||||
|
|
||||||
|
# Create a .env file in the same directory as this docker-compose.yml file
|
||||||
|
# and populate it with environment variables. See .env.sample
|
||||||
|
env_file:
|
||||||
|
- .env
|
||||||
|
|
||||||
|
ports:
|
||||||
|
- "${INVOKEAI_PORT:-9090}:9090"
|
||||||
|
volumes:
|
||||||
|
- ${INVOKEAI_ROOT:-~/invokeai}:${INVOKEAI_ROOT:-/invokeai}
|
||||||
|
- ${HF_HOME:-~/.cache/huggingface}:${HF_HOME:-/invokeai/.cache/huggingface}
|
||||||
|
# - ${INVOKEAI_MODELS_DIR:-${INVOKEAI_ROOT:-/invokeai/models}}
|
||||||
|
# - ${INVOKEAI_MODELS_CONFIG_PATH:-${INVOKEAI_ROOT:-/invokeai/configs/models.yaml}}
|
||||||
|
tty: true
|
||||||
|
stdin_open: true
|
||||||
|
|
||||||
|
# # Example of running alternative commands/scripts in the container
|
||||||
|
# command:
|
||||||
|
# - bash
|
||||||
|
# - -c
|
||||||
|
# - |
|
||||||
|
# invokeai-model-install --yes --default-only --config_file ${INVOKEAI_ROOT}/config_custom.yaml
|
||||||
|
# invokeai-nodes-web --host 0.0.0.0
|
65
docker/docker-entrypoint.sh
Executable file
65
docker/docker-entrypoint.sh
Executable file
@ -0,0 +1,65 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e -o pipefail
|
||||||
|
|
||||||
|
### Container entrypoint
|
||||||
|
# Runs the CMD as defined by the Dockerfile or passed to `docker run`
|
||||||
|
# Can be used to configure the runtime dir
|
||||||
|
# Bypass by using ENTRYPOINT or `--entrypoint`
|
||||||
|
|
||||||
|
### Set INVOKEAI_ROOT pointing to a valid runtime directory
|
||||||
|
# Otherwise configure the runtime dir first.
|
||||||
|
|
||||||
|
### Configure the InvokeAI runtime directory (done by default)):
|
||||||
|
# docker run --rm -it <this image> --configure
|
||||||
|
# or skip with --no-configure
|
||||||
|
|
||||||
|
### Set the CONTAINER_UID envvar to match your user.
|
||||||
|
# Ensures files created in the container are owned by you:
|
||||||
|
# docker run --rm -it -v /some/path:/invokeai -e CONTAINER_UID=$(id -u) <this image>
|
||||||
|
# Default UID: 1000 chosen due to popularity on Linux systems. Possibly 501 on MacOS.
|
||||||
|
|
||||||
|
USER_ID=${CONTAINER_UID:-1000}
|
||||||
|
USER=invoke
|
||||||
|
usermod -u ${USER_ID} ${USER} 1>/dev/null
|
||||||
|
|
||||||
|
configure() {
|
||||||
|
# Configure the runtime directory
|
||||||
|
if [[ -f ${INVOKEAI_ROOT}/invokeai.yaml ]]; then
|
||||||
|
echo "${INVOKEAI_ROOT}/invokeai.yaml exists. InvokeAI is already configured."
|
||||||
|
echo "To reconfigure InvokeAI, delete the above file."
|
||||||
|
echo "======================================================================"
|
||||||
|
else
|
||||||
|
mkdir -p ${INVOKEAI_ROOT}
|
||||||
|
chown --recursive ${USER} ${INVOKEAI_ROOT}
|
||||||
|
gosu ${USER} invokeai-configure --yes --default_only
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
## Skip attempting to configure.
|
||||||
|
## Must be passed first, before any other args.
|
||||||
|
if [[ $1 != "--no-configure" ]]; then
|
||||||
|
configure
|
||||||
|
else
|
||||||
|
shift
|
||||||
|
fi
|
||||||
|
|
||||||
|
### Set the $PUBLIC_KEY env var to enable SSH access.
|
||||||
|
# We do not install openssh-server in the image by default to avoid bloat.
|
||||||
|
# but it is useful to have the full SSH server e.g. on Runpod.
|
||||||
|
# (use SCP to copy files to/from the image, etc)
|
||||||
|
if [[ -v "PUBLIC_KEY" ]] && [[ ! -d "${HOME}/.ssh" ]]; then
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y openssh-server
|
||||||
|
pushd $HOME
|
||||||
|
mkdir -p .ssh
|
||||||
|
echo ${PUBLIC_KEY} > .ssh/authorized_keys
|
||||||
|
chmod -R 700 .ssh
|
||||||
|
popd
|
||||||
|
service ssh start
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
cd ${INVOKEAI_ROOT}
|
||||||
|
|
||||||
|
# Run the CMD as the Container User (not root).
|
||||||
|
exec gosu ${USER} "$@"
|
@ -1,54 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# This file is used to set environment variables for the build.sh and run.sh scripts.
|
|
||||||
|
|
||||||
# Try to detect the container flavor if no PIP_EXTRA_INDEX_URL got specified
|
|
||||||
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
|
|
||||||
|
|
||||||
# Activate virtual environment if not already activated and exists
|
|
||||||
if [[ -z $VIRTUAL_ENV ]]; then
|
|
||||||
[[ -e "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" ]] \
|
|
||||||
&& source "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" \
|
|
||||||
&& echo "Activated virtual environment: $VIRTUAL_ENV"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Decide which container flavor to build if not specified
|
|
||||||
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
|
|
||||||
# Check for CUDA and ROCm
|
|
||||||
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
|
|
||||||
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
|
|
||||||
if [[ "${CUDA_AVAILABLE}" == "True" ]]; then
|
|
||||||
CONTAINER_FLAVOR="cuda"
|
|
||||||
elif [[ "${ROCM_AVAILABLE}" == "True" ]]; then
|
|
||||||
CONTAINER_FLAVOR="rocm"
|
|
||||||
else
|
|
||||||
CONTAINER_FLAVOR="cpu"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Set PIP_EXTRA_INDEX_URL based on container flavor
|
|
||||||
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
|
|
||||||
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
|
|
||||||
elif [[ "$CONTAINER_FLAVOR" == "cpu" ]]; then
|
|
||||||
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu"
|
|
||||||
# elif [[ -z "$CONTAINER_FLAVOR" || "$CONTAINER_FLAVOR" == "cuda" ]]; then
|
|
||||||
# PIP_PACKAGE=${PIP_PACKAGE-".[xformers]"}
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Variables shared by build.sh and run.sh
|
|
||||||
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
|
|
||||||
REPOSITORY_NAME="${REPOSITORY_NAME,,}"
|
|
||||||
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME}_data"}"
|
|
||||||
ARCH="${ARCH-$(uname -m)}"
|
|
||||||
PLATFORM="${PLATFORM-linux/${ARCH}}"
|
|
||||||
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
|
|
||||||
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
|
|
||||||
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
|
|
||||||
CONTAINER_FLAVOR="${CONTAINER_FLAVOR-cuda}"
|
|
||||||
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
|
|
||||||
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
|
|
||||||
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"
|
|
||||||
|
|
||||||
# enable docker buildkit
|
|
||||||
export DOCKER_BUILDKIT=1
|
|
@ -1,41 +1,8 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
|
||||||
|
|
||||||
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||||
cd "$SCRIPTDIR" || exit 1
|
cd "$SCRIPTDIR" || exit 1
|
||||||
|
|
||||||
source ./env.sh
|
docker-compose up --build -d
|
||||||
|
docker-compose logs -f
|
||||||
# Create outputs directory if it does not exist
|
|
||||||
[[ -d ./outputs ]] || mkdir ./outputs
|
|
||||||
|
|
||||||
echo -e "You are using these values:\n"
|
|
||||||
echo -e "Volumename:\t${VOLUMENAME}"
|
|
||||||
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
|
|
||||||
echo -e "local Models:\t${MODELSPATH:-unset}\n"
|
|
||||||
|
|
||||||
docker run \
|
|
||||||
--interactive \
|
|
||||||
--tty \
|
|
||||||
--rm \
|
|
||||||
--platform="${PLATFORM}" \
|
|
||||||
--name="${REPOSITORY_NAME}" \
|
|
||||||
--hostname="${REPOSITORY_NAME}" \
|
|
||||||
--mount type=volume,volume-driver=local,source="${VOLUMENAME}",target=/data \
|
|
||||||
--mount type=bind,source="$(pwd)"/outputs/,target=/data/outputs/ \
|
|
||||||
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
|
|
||||||
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
|
|
||||||
--publish=9090:9090 \
|
|
||||||
--cap-add=sys_nice \
|
|
||||||
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
|
|
||||||
"${CONTAINER_IMAGE}" ${@:+$@}
|
|
||||||
|
|
||||||
echo -e "\nCleaning trash folder ..."
|
|
||||||
for f in outputs/.Trash*; do
|
|
||||||
if [ -e "$f" ]; then
|
|
||||||
rm -Rf "$f"
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
60
docker/runpod-readme.md
Normal file
60
docker/runpod-readme.md
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
# InvokeAI - A Stable Diffusion Toolkit
|
||||||
|
|
||||||
|
Stable Diffusion distribution by InvokeAI: https://github.com/invoke-ai
|
||||||
|
|
||||||
|
The Docker image tracks the `main` branch of the InvokeAI project, which means it includes the latest features, but may contain some bugs.
|
||||||
|
|
||||||
|
Your working directory is mounted under the `/workspace` path inside the pod. The models are in `/workspace/invokeai/models`, and outputs are in `/workspace/invokeai/outputs`.
|
||||||
|
|
||||||
|
> **Only the /workspace directory will persist between pod restarts!**
|
||||||
|
|
||||||
|
> **If you _terminate_ (not just _stop_) the pod, the /workspace will be lost.**
|
||||||
|
|
||||||
|
## Quickstart
|
||||||
|
|
||||||
|
1. Launch a pod from this template. **It will take about 5-10 minutes to run through the initial setup**. Be patient.
|
||||||
|
1. Wait for the application to load.
|
||||||
|
- TIP: you know it's ready when the CPU usage goes idle
|
||||||
|
- You can also check the logs for a line that says "_Point your browser at..._"
|
||||||
|
1. Open the Invoke AI web UI: click the `Connect` => `connect over HTTP` button.
|
||||||
|
1. Generate some art!
|
||||||
|
|
||||||
|
## Other things you can do
|
||||||
|
|
||||||
|
At any point you may edit the pod configuration and set an arbitrary Docker command. For example, you could run a command to downloads some models using `curl`, or fetch some images and place them into your outputs to continue a working session.
|
||||||
|
|
||||||
|
If you need to run *multiple commands*, define them in the Docker Command field like this:
|
||||||
|
|
||||||
|
`bash -c "cd ${INVOKEAI_ROOT}/outputs; wormhole receive 2-foo-bar; invoke.py --web --host 0.0.0.0"`
|
||||||
|
|
||||||
|
### Copying your data in and out of the pod
|
||||||
|
|
||||||
|
This image includes a couple of handy tools to help you get the data into the pod (such as your custom models or embeddings), and out of the pod (such as downloading your outputs). Here are your options for getting your data in and out of the pod:
|
||||||
|
|
||||||
|
- **SSH server**:
|
||||||
|
1. Make sure to create and set your Public Key in the RunPod settings (follow the official instructions)
|
||||||
|
1. Add an exposed port 22 (TCP) in the pod settings!
|
||||||
|
1. When your pod restarts, you will see a new entry in the `Connect` dialog. Use this SSH server to `scp` or `sftp` your files as necessary, or SSH into the pod using the fully fledged SSH server.
|
||||||
|
|
||||||
|
- [**Magic Wormhole**](https://magic-wormhole.readthedocs.io/en/latest/welcome.html):
|
||||||
|
1. On your computer, `pip install magic-wormhole` (see above instructions for details)
|
||||||
|
1. Connect to the command line **using the "light" SSH client** or the browser-based console. _Currently there's a bug where `wormhole` isn't available when connected to "full" SSH server, as described above_.
|
||||||
|
1. `wormhole send /workspace/invokeai/outputs` will send the entire `outputs` directory. You can also send individual files.
|
||||||
|
1. Once packaged, you will see a `wormhole receive <123-some-words>` command. Copy it
|
||||||
|
1. Paste this command into the terminal on your local machine to securely download the payload.
|
||||||
|
1. It works the same in reverse: you can `wormhole send` some models from your computer to the pod. Again, save your files somewhere in `/workspace` or they will be lost when the pod is stopped.
|
||||||
|
|
||||||
|
- **RunPod's Cloud Sync feature** may be used to sync the persistent volume to cloud storage. You could, for example, copy the entire `/workspace` to S3, add some custom models to it, and copy it back from S3 when launching new pod configurations. Follow the Cloud Sync instructions.
|
||||||
|
|
||||||
|
|
||||||
|
### Disable the NSFW checker
|
||||||
|
|
||||||
|
The NSFW checker is enabled by default. To disable it, edit the pod configuration and set the following command:
|
||||||
|
|
||||||
|
```
|
||||||
|
invoke --web --host 0.0.0.0 --no-nsfw_checker
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Template ©2023 Eugene Brodsky [ebr](https://github.com/ebr)
|
@ -250,8 +250,8 @@ from .model_cache import ModelCache, ModelLocker
|
|||||||
from .models import (
|
from .models import (
|
||||||
BaseModelType, ModelType, SubModelType,
|
BaseModelType, ModelType, SubModelType,
|
||||||
ModelError, SchedulerPredictionType, MODEL_CLASSES,
|
ModelError, SchedulerPredictionType, MODEL_CLASSES,
|
||||||
ModelConfigBase, ModelNotFoundException,
|
ModelConfigBase, ModelNotFoundException, InvalidModelException,
|
||||||
)
|
)
|
||||||
|
|
||||||
# We are only starting to number the config file with release 3.
|
# We are only starting to number the config file with release 3.
|
||||||
# The config file version doesn't have to start at release version, but it will help
|
# The config file version doesn't have to start at release version, but it will help
|
||||||
@ -275,10 +275,6 @@ class ModelInfo():
|
|||||||
def __exit__(self,*args, **kwargs):
|
def __exit__(self,*args, **kwargs):
|
||||||
self.context.__exit__(*args, **kwargs)
|
self.context.__exit__(*args, **kwargs)
|
||||||
|
|
||||||
class InvalidModelError(Exception):
|
|
||||||
"Raised when an invalid model is requested"
|
|
||||||
pass
|
|
||||||
|
|
||||||
class AddModelResult(BaseModel):
|
class AddModelResult(BaseModel):
|
||||||
name: str = Field(description="The name of the model after installation")
|
name: str = Field(description="The name of the model after installation")
|
||||||
model_type: ModelType = Field(description="The type of model")
|
model_type: ModelType = Field(description="The type of model")
|
||||||
@ -817,6 +813,8 @@ class ModelManager(object):
|
|||||||
model_config: ModelConfigBase = model_class.probe_config(str(model_path))
|
model_config: ModelConfigBase = model_class.probe_config(str(model_path))
|
||||||
self.models[model_key] = model_config
|
self.models[model_key] = model_config
|
||||||
new_models_found = True
|
new_models_found = True
|
||||||
|
except InvalidModelException:
|
||||||
|
self.logger.warning(f"Not a valid model: {model_path}")
|
||||||
except NotImplementedError as e:
|
except NotImplementedError as e:
|
||||||
self.logger.warning(e)
|
self.logger.warning(e)
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@ import inspect
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
from typing import Literal, get_origin
|
from typing import Literal, get_origin
|
||||||
from .base import BaseModelType, ModelType, SubModelType, ModelBase, ModelConfigBase, ModelVariantType, SchedulerPredictionType, ModelError, SilenceWarnings, ModelNotFoundException
|
from .base import BaseModelType, ModelType, SubModelType, ModelBase, ModelConfigBase, ModelVariantType, SchedulerPredictionType, ModelError, SilenceWarnings, ModelNotFoundException, InvalidModelException
|
||||||
from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model
|
from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model
|
||||||
from .vae import VaeModel
|
from .vae import VaeModel
|
||||||
from .lora import LoRAModel
|
from .lora import LoRAModel
|
||||||
|
@ -15,6 +15,9 @@ from contextlib import suppress
|
|||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union
|
from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union
|
||||||
|
|
||||||
|
class InvalidModelException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
class ModelNotFoundException(Exception):
|
class ModelNotFoundException(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -13,6 +13,7 @@ from .base import (
|
|||||||
calc_model_size_by_fs,
|
calc_model_size_by_fs,
|
||||||
calc_model_size_by_data,
|
calc_model_size_by_data,
|
||||||
classproperty,
|
classproperty,
|
||||||
|
InvalidModelException,
|
||||||
)
|
)
|
||||||
|
|
||||||
class ControlNetModelFormat(str, Enum):
|
class ControlNetModelFormat(str, Enum):
|
||||||
@ -73,11 +74,19 @@ class ControlNetModel(ModelBase):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def detect_format(cls, path: str):
|
def detect_format(cls, path: str):
|
||||||
|
if not os.path.exists(path):
|
||||||
|
raise ModelNotFoundException()
|
||||||
|
|
||||||
if os.path.isdir(path):
|
if os.path.isdir(path):
|
||||||
|
if os.path.exists(os.path.join(path, "config.json")):
|
||||||
return ControlNetModelFormat.Diffusers
|
return ControlNetModelFormat.Diffusers
|
||||||
else:
|
|
||||||
|
if os.path.isfile(path):
|
||||||
|
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "pth"]]):
|
||||||
return ControlNetModelFormat.Checkpoint
|
return ControlNetModelFormat.Checkpoint
|
||||||
|
|
||||||
|
raise InvalidModelException(f"Not a valid model: {path}")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def convert_if_required(
|
def convert_if_required(
|
||||||
cls,
|
cls,
|
||||||
|
@ -9,6 +9,7 @@ from .base import (
|
|||||||
ModelType,
|
ModelType,
|
||||||
SubModelType,
|
SubModelType,
|
||||||
classproperty,
|
classproperty,
|
||||||
|
InvalidModelException,
|
||||||
)
|
)
|
||||||
# TODO: naming
|
# TODO: naming
|
||||||
from ..lora import LoRAModel as LoRAModelRaw
|
from ..lora import LoRAModel as LoRAModelRaw
|
||||||
@ -56,11 +57,19 @@ class LoRAModel(ModelBase):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def detect_format(cls, path: str):
|
def detect_format(cls, path: str):
|
||||||
|
if not os.path.exists(path):
|
||||||
|
raise ModelNotFoundException()
|
||||||
|
|
||||||
if os.path.isdir(path):
|
if os.path.isdir(path):
|
||||||
|
if os.path.exists(os.path.join(path, "pytorch_lora_weights.bin")):
|
||||||
return LoRAModelFormat.Diffusers
|
return LoRAModelFormat.Diffusers
|
||||||
else:
|
|
||||||
|
if os.path.isfile(path):
|
||||||
|
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
|
||||||
return LoRAModelFormat.LyCORIS
|
return LoRAModelFormat.LyCORIS
|
||||||
|
|
||||||
|
raise InvalidModelException(f"Not a valid model: {path}")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def convert_if_required(
|
def convert_if_required(
|
||||||
cls,
|
cls,
|
||||||
|
@ -16,6 +16,7 @@ from .base import (
|
|||||||
SilenceWarnings,
|
SilenceWarnings,
|
||||||
read_checkpoint_meta,
|
read_checkpoint_meta,
|
||||||
classproperty,
|
classproperty,
|
||||||
|
InvalidModelException,
|
||||||
)
|
)
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
from omegaconf import OmegaConf
|
from omegaconf import OmegaConf
|
||||||
@ -98,11 +99,19 @@ class StableDiffusion1Model(DiffusersModel):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def detect_format(cls, model_path: str):
|
def detect_format(cls, model_path: str):
|
||||||
|
if not os.path.exists(model_path):
|
||||||
|
raise ModelNotFoundException()
|
||||||
|
|
||||||
if os.path.isdir(model_path):
|
if os.path.isdir(model_path):
|
||||||
|
if os.path.exists(os.path.join(model_path, "model_index.json")):
|
||||||
return StableDiffusion1ModelFormat.Diffusers
|
return StableDiffusion1ModelFormat.Diffusers
|
||||||
else:
|
|
||||||
|
if os.path.isfile(model_path):
|
||||||
|
if any([model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
|
||||||
return StableDiffusion1ModelFormat.Checkpoint
|
return StableDiffusion1ModelFormat.Checkpoint
|
||||||
|
|
||||||
|
raise InvalidModelException(f"Not a valid model: {model_path}")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def convert_if_required(
|
def convert_if_required(
|
||||||
cls,
|
cls,
|
||||||
@ -200,11 +209,19 @@ class StableDiffusion2Model(DiffusersModel):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def detect_format(cls, model_path: str):
|
def detect_format(cls, model_path: str):
|
||||||
|
if not os.path.exists(model_path):
|
||||||
|
raise ModelNotFoundException()
|
||||||
|
|
||||||
if os.path.isdir(model_path):
|
if os.path.isdir(model_path):
|
||||||
|
if os.path.exists(os.path.join(model_path, "model_index.json")):
|
||||||
return StableDiffusion2ModelFormat.Diffusers
|
return StableDiffusion2ModelFormat.Diffusers
|
||||||
else:
|
|
||||||
|
if os.path.isfile(model_path):
|
||||||
|
if any([model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
|
||||||
return StableDiffusion2ModelFormat.Checkpoint
|
return StableDiffusion2ModelFormat.Checkpoint
|
||||||
|
|
||||||
|
raise InvalidModelException(f"Not a valid model: {model_path}")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def convert_if_required(
|
def convert_if_required(
|
||||||
cls,
|
cls,
|
||||||
|
@ -9,6 +9,7 @@ from .base import (
|
|||||||
SubModelType,
|
SubModelType,
|
||||||
classproperty,
|
classproperty,
|
||||||
ModelNotFoundException,
|
ModelNotFoundException,
|
||||||
|
InvalidModelException,
|
||||||
)
|
)
|
||||||
# TODO: naming
|
# TODO: naming
|
||||||
from ..lora import TextualInversionModel as TextualInversionModelRaw
|
from ..lora import TextualInversionModel as TextualInversionModelRaw
|
||||||
@ -59,8 +60,19 @@ class TextualInversionModel(ModelBase):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def detect_format(cls, path: str):
|
def detect_format(cls, path: str):
|
||||||
|
if not os.path.exists(path):
|
||||||
|
raise ModelNotFoundException()
|
||||||
|
|
||||||
|
if os.path.isdir(path):
|
||||||
|
if os.path.exists(os.path.join(path, "learned_embeds.bin")):
|
||||||
|
return None # diffusers-ti
|
||||||
|
|
||||||
|
if os.path.isfile(path):
|
||||||
|
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
raise InvalidModelException(f"Not a valid model: {path}")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def convert_if_required(
|
def convert_if_required(
|
||||||
cls,
|
cls,
|
||||||
|
@ -15,6 +15,7 @@ from .base import (
|
|||||||
calc_model_size_by_fs,
|
calc_model_size_by_fs,
|
||||||
calc_model_size_by_data,
|
calc_model_size_by_data,
|
||||||
classproperty,
|
classproperty,
|
||||||
|
InvalidModelException,
|
||||||
)
|
)
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
from diffusers.utils import is_safetensors_available
|
from diffusers.utils import is_safetensors_available
|
||||||
@ -75,11 +76,19 @@ class VaeModel(ModelBase):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def detect_format(cls, path: str):
|
def detect_format(cls, path: str):
|
||||||
|
if not os.path.exists(path):
|
||||||
|
raise ModelNotFoundException()
|
||||||
|
|
||||||
if os.path.isdir(path):
|
if os.path.isdir(path):
|
||||||
|
if os.path.exists(os.path.join(path, "config.json")):
|
||||||
return VaeModelFormat.Diffusers
|
return VaeModelFormat.Diffusers
|
||||||
else:
|
|
||||||
|
if os.path.isfile(path):
|
||||||
|
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
|
||||||
return VaeModelFormat.Checkpoint
|
return VaeModelFormat.Checkpoint
|
||||||
|
|
||||||
|
raise InvalidModelException(f"Not a valid model: {path}")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def convert_if_required(
|
def convert_if_required(
|
||||||
cls,
|
cls,
|
||||||
|
@ -102,6 +102,8 @@ export type AppFeature =
|
|||||||
export type SDFeature =
|
export type SDFeature =
|
||||||
| 'controlNet'
|
| 'controlNet'
|
||||||
| 'noise'
|
| 'noise'
|
||||||
|
| 'perlinNoise'
|
||||||
|
| 'noiseThreshold'
|
||||||
| 'variation'
|
| 'variation'
|
||||||
| 'symmetry'
|
| 'symmetry'
|
||||||
| 'seamless'
|
| 'seamless'
|
||||||
|
@ -27,6 +27,9 @@ const ParamNoiseCollapse = () => {
|
|||||||
const { t } = useTranslation();
|
const { t } = useTranslation();
|
||||||
|
|
||||||
const isNoiseEnabled = useFeatureStatus('noise').isFeatureEnabled;
|
const isNoiseEnabled = useFeatureStatus('noise').isFeatureEnabled;
|
||||||
|
const isPerlinNoiseEnabled = useFeatureStatus('perlinNoise').isFeatureEnabled;
|
||||||
|
const isNoiseThresholdEnabled =
|
||||||
|
useFeatureStatus('noiseThreshold').isFeatureEnabled;
|
||||||
|
|
||||||
const { activeLabel } = useAppSelector(selector);
|
const { activeLabel } = useAppSelector(selector);
|
||||||
|
|
||||||
@ -42,8 +45,8 @@ const ParamNoiseCollapse = () => {
|
|||||||
<Flex sx={{ gap: 2, flexDirection: 'column' }}>
|
<Flex sx={{ gap: 2, flexDirection: 'column' }}>
|
||||||
<ParamNoiseToggle />
|
<ParamNoiseToggle />
|
||||||
<ParamCpuNoiseToggle />
|
<ParamCpuNoiseToggle />
|
||||||
<ParamPerlinNoise />
|
{isPerlinNoiseEnabled && <ParamPerlinNoise />}
|
||||||
<ParamNoiseThreshold />
|
{isNoiseThresholdEnabled && <ParamNoiseThreshold />}
|
||||||
</Flex>
|
</Flex>
|
||||||
</IAICollapse>
|
</IAICollapse>
|
||||||
);
|
);
|
||||||
|
@ -6,8 +6,15 @@ import { merge } from 'lodash-es';
|
|||||||
export const initialConfigState: AppConfig = {
|
export const initialConfigState: AppConfig = {
|
||||||
shouldUpdateImagesOnConnect: false,
|
shouldUpdateImagesOnConnect: false,
|
||||||
disabledTabs: [],
|
disabledTabs: [],
|
||||||
disabledFeatures: [],
|
disabledFeatures: ['lightbox', 'faceRestore'],
|
||||||
disabledSDFeatures: [],
|
disabledSDFeatures: [
|
||||||
|
'variation',
|
||||||
|
'seamless',
|
||||||
|
'symmetry',
|
||||||
|
'hires',
|
||||||
|
'perlinNoise',
|
||||||
|
'noiseThreshold',
|
||||||
|
],
|
||||||
canRestoreDeletedImagesFromBin: true,
|
canRestoreDeletedImagesFromBin: true,
|
||||||
sd: {
|
sd: {
|
||||||
disabledControlNetModels: [],
|
disabledControlNetModels: [],
|
||||||
|
Loading…
Reference in New Issue
Block a user