Merge branch 'main' into patch-2

This commit is contained in:
Lincoln Stein 2023-02-03 18:03:29 -05:00 committed by GitHub
commit 4500c8b244
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 154 additions and 288 deletions

View File

@ -1,18 +1,20 @@
# use this file as a whitelist
*
!assets/caution.png
!backend
!frontend/dist
!invokeai
!ldm
!pyproject.toml
!README.md
!scripts
# Guard against pulling in any models that might exist in the directory tree
**.pt*
**/*.pt*
**/*.ckpt
# unignore configs, but only ignore the custom models.yaml, in case it exists
!configs
configs/models.yaml
configs/models.yaml.orig
# whitelist frontend, but ignore node_modules
invokeai/frontend/node_modules
# ignore python cache
**/__pycache__
**/*.py[cod]
**/*.egg-info

2
.github/CODEOWNERS vendored
View File

@ -4,4 +4,4 @@ scripts/legacy_api.py @CapableWeb
tests/legacy_tests.sh @CapableWeb
installer/ @ebr
.github/workflows/ @mauwii
docker_build/ @mauwii
docker/ @mauwii

View File

@ -1,88 +0,0 @@
name: Build and push cloud image
on:
workflow_dispatch:
# push:
# branches:
# - main
# tags:
# - v*
# # we will NOT push the image on pull requests, only test buildability.
# pull_request:
# branches:
# - main
permissions:
contents: read
packages: write
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
docker:
if: github.event.pull_request.draft == false
strategy:
fail-fast: false
matrix:
arch:
- x86_64
# requires resolving a patchmatch issue
# - aarch64
runs-on: ubuntu-latest
name: ${{ matrix.arch }}
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
if: matrix.arch == 'aarch64'
- name: Docker meta
id: meta
uses: docker/metadata-action@v4
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
# see https://github.com/docker/metadata-action
# will push the following tags:
# :edge
# :main (+ any other branches enabled in the workflow)
# :<tag>
# :1.2.3 (for semver tags)
# :1.2 (for semver tags)
# :<sha>
tags: |
type=edge,branch=main
type=ref,event=branch
type=ref,event=tag
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=sha
# suffix image tags with architecture
flavor: |
latest=auto
suffix=-${{ matrix.arch }},latest=true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
# do not login to container registry on PRs
- if: github.event_name != 'pull_request'
name: Docker login
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push cloud image
uses: docker/build-push-action@v3
with:
context: .
file: docker-build/Dockerfile.cloud
platforms: Linux/${{ matrix.arch }}
# do not push the image on PRs
push: false
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

View File

@ -15,14 +15,19 @@ jobs:
flavor:
- amd
- cuda
- cpu
include:
- flavor: amd
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
dockerfile: docker-build/Dockerfile
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
- flavor: cuda
pip-extra-index-url: ''
dockerfile: docker-build/Dockerfile
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
- flavor: cpu
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
runs-on: ubuntu-latest
name: ${{ matrix.flavor }}
@ -34,7 +39,8 @@ jobs:
id: meta
uses: docker/metadata-action@v4
with:
images: ghcr.io/${{ github.repository }}-${{ matrix.flavor }}
github-token: ${{ secrets.GITHUB_TOKEN }}
images: ghcr.io/${{ github.repository }}
tags: |
type=ref,event=branch
type=ref,event=tag
@ -43,7 +49,8 @@ jobs:
type=semver,pattern={{major}}
type=sha
flavor: |
latest=true
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
suffix=${{ matrix.flavor }},onlatest=false
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
@ -69,5 +76,15 @@ jobs:
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
# cache-from: type=gha
# cache-to: type=gha,mode=max
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Output image, digest and metadata to summary
run: |
{
echo imageid: "${{ steps.docker_build.outputs.imageid }}"
echo digest: "${{ steps.docker_build.outputs.digest }}"
echo labels: "${{ steps.meta.outputs.labels }}"
echo tags: "${{ steps.meta.outputs.tags }}"
echo version: "${{ steps.meta.outputs.version }}"
} >> "$GITHUB_STEP_SUMMARY"

View File

@ -1,86 +0,0 @@
#######################
#### Builder stage ####
FROM library/ubuntu:22.04 AS builder
ARG DEBIAN_FRONTEND=noninteractive
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt update && apt-get install -y \
git \
libglib2.0-0 \
libgl1-mesa-glx \
python3-venv \
python3-pip \
build-essential \
python3-opencv \
libopencv-dev
# This is needed for patchmatch support
RUN cd /usr/lib/x86_64-linux-gnu/pkgconfig/ &&\
ln -sf opencv4.pc opencv.pc
ARG WORKDIR=/invokeai
WORKDIR ${WORKDIR}
ENV VIRTUAL_ENV=${WORKDIR}/.venv
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
RUN --mount=type=cache,target=/root/.cache/pip \
python3 -m venv ${VIRTUAL_ENV} &&\
pip install --extra-index-url https://download.pytorch.org/whl/cu116 \
torch==1.12.0+cu116 \
torchvision==0.13.0+cu116 &&\
pip install -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch
COPY . .
RUN --mount=type=cache,target=/root/.cache/pip \
cp environments-and-requirements/requirements-lin-cuda.txt requirements.txt && \
pip install -r requirements.txt &&\
pip install -e .
#######################
#### Runtime stage ####
FROM library/ubuntu:22.04 as runtime
ARG DEBIAN_FRONTEND=noninteractive
ENV PYTHONUNBUFFERED=1
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt update && apt install -y --no-install-recommends \
git \
curl \
ncdu \
iotop \
bzip2 \
libglib2.0-0 \
libgl1-mesa-glx \
python3-venv \
python3-pip \
build-essential \
python3-opencv \
libopencv-dev &&\
apt-get clean && apt-get autoclean
ARG WORKDIR=/invokeai
WORKDIR ${WORKDIR}
ENV INVOKEAI_ROOT=/mnt/invokeai
ENV VIRTUAL_ENV=${WORKDIR}/.venv
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
COPY --from=builder ${WORKDIR} ${WORKDIR}
COPY --from=builder /usr/lib/x86_64-linux-gnu/pkgconfig /usr/lib/x86_64-linux-gnu/pkgconfig
# build patchmatch
RUN python -c "from patchmatch import patch_match"
## workaround for non-existent initfile when runtime directory is mounted; see #1613
RUN touch /root/.invokeai
ENTRYPOINT ["bash"]
CMD ["-c", "python3 scripts/invoke.py --web --host 0.0.0.0"]

View File

@ -1,44 +0,0 @@
# Directory in the container where the INVOKEAI_ROOT (runtime dir) will be mounted
INVOKEAI_ROOT=/mnt/invokeai
# Host directory to contain the runtime dir. Will be mounted at INVOKEAI_ROOT path in the container
HOST_MOUNT_PATH=${HOME}/invokeai
IMAGE=local/invokeai:latest
USER=$(shell id -u)
GROUP=$(shell id -g)
# All downloaded models, config, etc will end up in ${HOST_MOUNT_PATH} on the host.
# This is consistent with the expected non-Docker behaviour.
# Contents can be moved to a persistent storage and used to prime the cache on another host.
build:
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
configure:
docker run --rm -it --runtime=nvidia --gpus=all \
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
${IMAGE} -c "python scripts/configure_invokeai.py"
# Run the container with the runtime dir mounted and the web server exposed on port 9090
web:
docker run --rm -it --runtime=nvidia --gpus=all \
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
-p 9090:9090 \
${IMAGE} -c "python scripts/invoke.py --web --host 0.0.0.0"
# Run the cli with the runtime dir mounted
cli:
docker run --rm -it --runtime=nvidia --gpus=all \
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
${IMAGE} -c "python scripts/invoke.py"
# Run the container with the runtime dir mounted and open a bash shell
shell:
docker run --rm -it --runtime=nvidia --gpus=all \
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} ${IMAGE} --
.PHONY: build configure web cli shell

View File

@ -1,10 +0,0 @@
#!/usr/bin/env bash
# Variables shared by build.sh and run.sh
REPOSITORY_NAME=${REPOSITORY_NAME:-$(basename "$(git rev-parse --show-toplevel)")}
VOLUMENAME=${VOLUMENAME:-${REPOSITORY_NAME,,}_data}
ARCH=${ARCH:-$(uname -m)}
PLATFORM=${PLATFORM:-Linux/${ARCH}}
CONTAINER_FLAVOR=${CONTAINER_FLAVOR:-cuda}
INVOKEAI_BRANCH=$(git branch --show)
INVOKEAI_TAG=${REPOSITORY_NAME,,}-${CONTAINER_FLAVOR}:${INVOKEAI_TAG:-${INVOKEAI_BRANCH##*/}}

View File

@ -1,8 +1,12 @@
# syntax=docker/dockerfile:1
FROM python:3.9-slim AS python-base
# use bash
SHELL [ "/bin/bash", "-c" ]
# Maintained by Matthias Wild <mauwii@outlook.de>
ARG PYTHON_VERSION=3.9
##################
### base image ###
##################
FROM python:${PYTHON_VERSION}-slim AS python-base
# Install necesarry packages
RUN \
@ -17,12 +21,39 @@ RUN \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
ARG APPDIR=/usr/src/app
ENV APPDIR ${APPDIR}
# set working directory and path
ARG APPDIR=/usr/src
ARG APPNAME=InvokeAI
WORKDIR ${APPDIR}
ENV PATH=${APPDIR}/${APPNAME}/bin:$PATH
FROM python-base AS builder
######################
### build frontend ###
######################
FROM node:lts as frontend-builder
# Copy Sources
ARG APPDIR=/usr/src
WORKDIR ${APPDIR}
COPY --link . .
# install dependencies and build frontend
WORKDIR ${APPDIR}/invokeai/frontend
RUN \
--mount=type=cache,target=/usr/local/share/.cache/yarn/v6 \
yarn install \
--prefer-offline \
--frozen-lockfile \
--non-interactive \
--production=false \
&& yarn build
###################################
### install python dependencies ###
###################################
FROM python-base AS pyproject-builder
# Install dependencies
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
@ -34,25 +65,28 @@ RUN \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# create virtual environment
RUN python3 -m venv "${APPNAME}" \
--upgrade-deps
# copy sources
COPY --link . .
COPY --from=frontend-builder ${APPDIR} .
# install pyproject.toml
ARG PIP_EXTRA_INDEX_URL
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
RUN --mount=type=cache,target=/root/.cache/pip,sharing=locked \
"${APPDIR}/${APPNAME}/bin/pip" install \
--use-pep517 \
.
# install requirements
RUN python3 -m venv invokeai \
&& ${APPDIR}/invokeai/bin/pip \
install \
--no-cache-dir \
--use-pep517 \
.
#####################
### runtime image ###
#####################
FROM python-base AS runtime
# setup environment
COPY --link . .
COPY --from=builder ${APPDIR}/invokeai ${APPDIR}/invokeai
ENV PATH=${APPDIR}/invokeai/bin:$PATH
COPY --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME}
ENV INVOKEAI_ROOT=/data
ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
@ -73,6 +107,6 @@ RUN \
&& rm -rf /var/lib/apt/lists/*
# set Entrypoint and default CMD
ENTRYPOINT [ "invoke" ]
ENTRYPOINT [ "invokeai" ]
CMD [ "--web", "--host=0.0.0.0" ]
VOLUME [ "/data" ]

View File

@ -2,30 +2,31 @@
set -e
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
#
# Some possible pip extra-index urls (cuda 11.7 is available without extra url):
#
# CUDA 11.6: https://download.pytorch.org/whl/cu116
# ROCm 5.2: https://download.pytorch.org/whl/rocm5.2
# CPU: https://download.pytorch.org/whl/cpu
#
# as found on https://pytorch.org/get-started/locally/
cd "$(dirname "$0")" || exit 1
SCRIPTDIR=$(dirname "$0")
cd "$SCRIPTDIR" || exit 1
source ./env.sh
DOCKERFILE=${INVOKE_DOCKERFILE:-"./Dockerfile"}
DOCKERFILE=${INVOKE_DOCKERFILE:-Dockerfile}
# print the settings
echo -e "You are using these values:\n"
echo -e "Dockerfile:\t ${DOCKERFILE}"
echo -e "extra-index-url: ${PIP_EXTRA_INDEX_URL:-none}"
echo -e "Volumename:\t ${VOLUMENAME}"
echo -e "arch:\t\t ${ARCH}"
echo -e "Platform:\t ${PLATFORM}"
echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n"
echo -e "Dockerfile: \t${DOCKERFILE}"
echo -e "index-url: \t${PIP_EXTRA_INDEX_URL:-none}"
echo -e "Volumename: \t${VOLUMENAME}"
echo -e "Platform: \t${PLATFORM}"
echo -e "Registry: \t${CONTAINER_REGISTRY}"
echo -e "Repository: \t${CONTAINER_REPOSITORY}"
echo -e "Container Tag: \t${CONTAINER_TAG}"
echo -e "Container Image: ${CONTAINER_IMAGE}\n"
# Create docker volume
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
echo -e "Volume already exists\n"
else
@ -36,7 +37,7 @@ fi
# Build Container
docker build \
--platform="${PLATFORM}" \
--tag="${INVOKEAI_TAG}" \
${PIP_EXTRA_INDEX_URL:+--build-arg=PIP_EXTRA_INDEX_URL="${PIP_EXTRA_INDEX_URL}"} \
--tag="${CONTAINER_IMAGE}" \
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
--file="${DOCKERFILE}" \
..

35
docker/env.sh Normal file
View File

@ -0,0 +1,35 @@
#!/usr/bin/env bash
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
# Decide which container flavor to build if not specified
if [[ -z "$CONTAINER_FLAVOR" ]]; then
# Check for CUDA and ROCm
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
if [[ "$(uname -s)" != "Darwin" && "${CUDA_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR=cuda
elif [[ "$(uname -s)" != "Darwin" && "${ROCM_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR="rocm"
else
CONTAINER_FLAVOR="cpu"
fi
fi
# Set PIP_EXTRA_INDEX_URL based on container flavor
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
PIP_EXTRA_INDEX_URL="${PIP_EXTRA_INDEX_URL-"https://download.pytorch.org/whl/rocm"}"
elif CONTAINER_FLAVOR=cpu; then
PIP_EXTRA_INDEX_URL="${PIP_EXTRA_INDEX_URL-"https://download.pytorch.org/whl/cpu"}"
fi
fi
# Variables shared by build.sh and run.sh
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME,,}_data"}"
ARCH="${ARCH-$(uname -m)}"
PLATFORM="${PLATFORM-Linux/${ARCH}}"
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"

View File

@ -4,27 +4,28 @@ set -e
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
cd "$(dirname "$0")" || exit 1
SCRIPTDIR=$(dirname "$0")
cd "$SCRIPTDIR" || exit 1
source ./env.sh
echo -e "You are using these values:\n"
echo -e "Volumename:\t${VOLUMENAME}"
echo -e "Invokeai_tag:\t${INVOKEAI_TAG}"
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
echo -e "local Models:\t${MODELSPATH:-unset}\n"
docker run \
--interactive \
--tty \
--rm \
--platform="$PLATFORM" \
--platform="${PLATFORM}" \
--name="${REPOSITORY_NAME,,}" \
--hostname="${REPOSITORY_NAME,,}" \
--mount=source="$VOLUMENAME",target=/data \
--mount=source="${VOLUMENAME}",target=/data \
${MODELSPATH:+-u "$(id -u):$(id -g)"} \
${MODELSPATH:+--mount=type=bind,source=${MODELSPATH},target=/data/models} \
${HUGGING_FACE_HUB_TOKEN:+--env=HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}} \
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
--publish=9090:9090 \
--cap-add=sys_nice \
${GPU_FLAGS:+--gpus=${GPU_FLAGS}} \
"$INVOKEAI_TAG" ${1:+$@}
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
"${CONTAINER_IMAGE}" ${1:+$@}

View File

@ -50,7 +50,7 @@ def main():
Globals.internet_available = args.internet_available and check_internet()
Globals.disable_xformers = not args.xformers
Globals.ckpt_convert = args.ckpt_convert
print(f'>> Internet connectivity is {Globals.internet_available}')
if not args.conf:
@ -1111,9 +1111,13 @@ def write_commands(opt, file_path:str, outfilepath:str):
def report_model_error(opt:Namespace, e:Exception):
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
print('** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models.')
response = input('Do you want to run invokeai-configure script to select and/or reinstall models? [y] ')
if response.startswith(('n','N')):
return
yes_to_all = os.environ.get('INVOKE_MODEL_RECONFIGURE')
if yes_to_all:
print('** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE')
else:
response = input('Do you want to run invokeai-configure script to select and/or reinstall models? [y] ')
if response.startswith(('n', 'N')):
return
print('invokeai-configure is launching....\n')
@ -1121,13 +1125,13 @@ def report_model_error(opt:Namespace, e:Exception):
# only the arguments accepted by the configuration script are parsed
root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else []
config = ["--config", opt.conf] if opt.conf is not None else []
yes_to_all = os.environ.get('INVOKE_MODEL_RECONFIGURE')
previous_args = sys.argv
sys.argv = [ 'invokeai-configure' ]
sys.argv.extend(root_dir)
sys.argv.extend(config)
if yes_to_all is not None:
sys.argv.append(yes_to_all)
for arg in yes_to_all.split():
sys.argv.append(arg)
from ldm.invoke.config import configure_invokeai
configure_invokeai.main()