fix broken Dockerfile (#2445)

also switch to `python:3.9-slim` since it has a ton less security issues
This commit is contained in:
Matthias Wild 2023-02-01 01:47:25 +01:00 committed by GitHub
commit af74a2d1f4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 140 additions and 119 deletions

View File

@ -1,19 +1,18 @@
* *
!assets/caution.png
!backend !backend
!environments-and-requirements !frontend/dist
!frontend
!ldm !ldm
!main.py !pyproject.toml
!README.md
!scripts !scripts
!server
!static
!setup.py
# Guard against pulling in any models that might exist in the directory tree # Guard against pulling in any models that might exist in the directory tree
**/*.pt* **.pt*
# unignore configs, but only ignore the custom models.yaml, in case it exists # unignore configs, but only ignore the custom models.yaml, in case it exists
!configs !configs
configs/models.yaml configs/models.yaml
configs/models.yaml.orig
**/__pycache__ **/__pycache__

View File

@ -3,63 +3,59 @@ on:
push: push:
branches: branches:
- 'main' - 'main'
tags:
- 'v*.*.*'
jobs: jobs:
docker: docker:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
registry:
- ghcr.io
flavor: flavor:
- amd - amd
- cuda - cuda
# - cloud
include: include:
- flavor: amd - flavor: amd
pip-requirements: requirements-lin-amd.txt pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
dockerfile: docker-build/Dockerfile dockerfile: docker-build/Dockerfile
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
- flavor: cuda - flavor: cuda
pip-requirements: requirements-lin-cuda.txt pip-extra-index-url: ''
dockerfile: docker-build/Dockerfile dockerfile: docker-build/Dockerfile
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
# - flavor: cloud
# pip-requirements: requirements-lin-cuda.txt
# dockerfile: docker-build/Dockerfile.cloud
# platforms: linux/amd64
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: ${{ matrix.flavor }} name: ${{ matrix.flavor }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Docker meta - name: Docker meta
id: meta id: meta
uses: docker/metadata-action@v4 uses: docker/metadata-action@v4
with: with:
images: ${{ matrix.registry }}/${{ github.repository }}-${{ matrix.flavor }} images: ghcr.io/${{ github.repository }}-${{ matrix.flavor }}
tags: | tags: |
type=ref,event=branch type=ref,event=branch
type=ref,event=tag type=ref,event=tag
type=semver,pattern={{version}} type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=sha type=sha
flavor: | flavor: |
latest=true latest=true
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2 uses: docker/setup-buildx-action@v2
- if: github.event_name != 'pull_request' - name: Login to GitHub Container Registry
name: Docker login if: github.event_name != 'pull_request'
uses: docker/login-action@v2 uses: docker/login-action@v2
with: with:
registry: ${{ matrix.registry }} registry: ghcr.io
username: ${{ github.actor }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Build container - name: Build container
@ -71,4 +67,6 @@ jobs:
push: ${{ github.event_name != 'pull_request' }} push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
build-args: pip_requirements=${{ matrix.pip-requirements }} build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
# cache-from: type=gha
# cache-to: type=gha,mode=max

View File

@ -1,59 +1,71 @@
FROM python:3.10-slim AS builder # syntax=docker/dockerfile:1
FROM python:3.9-slim AS python-base
# use bash # use bash
SHELL [ "/bin/bash", "-c" ] SHELL [ "/bin/bash", "-c" ]
# Install necesarry packages # Install necesarry packages
RUN apt-get update \ RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get install -y \
--no-install-recommends \
libgl1-mesa-glx=20.3.* \
libglib2.0-0=2.66.* \
libopencv-dev=4.5.* \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
ARG APPDIR=/usr/src/app
ENV APPDIR ${APPDIR}
WORKDIR ${APPDIR}
FROM python-base AS builder
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get install -y \ && apt-get install -y \
--no-install-recommends \ --no-install-recommends \
gcc=4:10.2.* \ gcc=4:10.2.* \
libgl1-mesa-glx=20.3.* \
libglib2.0-0=2.66.* \
python3-dev=3.9.* \ python3-dev=3.9.* \
&& apt-get clean \ && apt-get clean \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# set WORKDIR, PATH and copy sources # copy sources
ARG APPDIR=/usr/src/app COPY --link . .
WORKDIR ${APPDIR} ARG PIP_EXTRA_INDEX_URL
ENV PATH ${APPDIR}/.venv/bin:$PATH ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
ARG PIP_REQUIREMENTS=requirements-lin-cuda.txt
COPY . ./environments-and-requirements/${PIP_REQUIREMENTS} ./
# install requirements # install requirements
RUN python3 -m venv .venv \ RUN python3 -m venv invokeai \
&& pip install \ && ${APPDIR}/invokeai/bin/pip \
--upgrade \ install \
--no-cache-dir \ --no-cache-dir \
'wheel>=0.38.4' \ --use-pep517 \
&& pip install \ .
--no-cache-dir \
-r ${PIP_REQUIREMENTS}
FROM python:3.10-slim AS runtime FROM python-base AS runtime
# setup environment # setup environment
ARG APPDIR=/usr/src/app COPY --link . .
WORKDIR ${APPDIR} COPY --from=builder ${APPDIR}/invokeai ${APPDIR}/invokeai
COPY --from=builder ${APPDIR} . ENV PATH=${APPDIR}/invokeai/bin:$PATH
ENV \ ENV INVOKEAI_ROOT=/data
PATH=${APPDIR}/.venv/bin:$PATH \ ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
INVOKEAI_ROOT=/data \
INVOKE_MODEL_RECONFIGURE=--yes
# Install necesarry packages # build patchmatch
RUN apt-get update \ RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get install -y \ && apt-get install -y \
--no-install-recommends \ --no-install-recommends \
build-essential=12.9 \ build-essential=12.9 \
libgl1-mesa-glx=20.3.* \ && PYTHONDONTWRITEBYTECODE=1 \
libglib2.0-0=2.66.* \ python3 -c "from patchmatch import patch_match" \
libopencv-dev=4.5.* \
&& ln -sf \
/usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv4.pc \
/usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv.pc \
&& python3 -c "from patchmatch import patch_match" \
&& apt-get remove -y \ && apt-get remove -y \
--autoremove \ --autoremove \
build-essential \ build-essential \
@ -61,5 +73,6 @@ RUN apt-get update \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
# set Entrypoint and default CMD # set Entrypoint and default CMD
ENTRYPOINT [ "python3", "scripts/invoke.py" ] ENTRYPOINT [ "invoke" ]
CMD [ "--web", "--host=0.0.0.0" ] CMD [ "--web", "--host=0.0.0.0" ]
VOLUME [ "/data" ]

View File

@ -2,18 +2,25 @@
set -e set -e
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup # How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
#
# Some possible pip extra-index urls (cuda 11.7 is available without extra url):
#
# CUDA 11.6: https://download.pytorch.org/whl/cu116
# ROCm 5.2: https://download.pytorch.org/whl/rocm5.2
# CPU: https://download.pytorch.org/whl/cpu
#
# as found on https://pytorch.org/get-started/locally/
source ./docker-build/env.sh \ cd "$(dirname "$0")" || exit 1
|| echo "please execute docker-build/build.sh from repository root" \
|| exit 1
PIP_REQUIREMENTS=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt} source ./env.sh
DOCKERFILE=${INVOKE_DOCKERFILE:-docker-build/Dockerfile}
DOCKERFILE=${INVOKE_DOCKERFILE:-"./Dockerfile"}
# print the settings # print the settings
echo -e "You are using these values:\n" echo -e "You are using these values:\n"
echo -e "Dockerfile:\t ${DOCKERFILE}" echo -e "Dockerfile:\t ${DOCKERFILE}"
echo -e "Requirements:\t ${PIP_REQUIREMENTS}" echo -e "extra-index-url: ${PIP_EXTRA_INDEX_URL:-none}"
echo -e "Volumename:\t ${VOLUMENAME}" echo -e "Volumename:\t ${VOLUMENAME}"
echo -e "arch:\t\t ${ARCH}" echo -e "arch:\t\t ${ARCH}"
echo -e "Platform:\t ${PLATFORM}" echo -e "Platform:\t ${PLATFORM}"
@ -30,6 +37,6 @@ fi
docker build \ docker build \
--platform="${PLATFORM}" \ --platform="${PLATFORM}" \
--tag="${INVOKEAI_TAG}" \ --tag="${INVOKEAI_TAG}" \
--build-arg="PIP_REQUIREMENTS=${PIP_REQUIREMENTS}" \ ${PIP_EXTRA_INDEX_URL:+--build-arg=PIP_EXTRA_INDEX_URL="${PIP_EXTRA_INDEX_URL}"} \
--file="${DOCKERFILE}" \ --file="${DOCKERFILE}" \
. ..

View File

@ -7,4 +7,4 @@ ARCH=${ARCH:-$(uname -m)}
PLATFORM=${PLATFORM:-Linux/${ARCH}} PLATFORM=${PLATFORM:-Linux/${ARCH}}
CONTAINER_FLAVOR=${CONTAINER_FLAVOR:-cuda} CONTAINER_FLAVOR=${CONTAINER_FLAVOR:-cuda}
INVOKEAI_BRANCH=$(git branch --show) INVOKEAI_BRANCH=$(git branch --show)
INVOKEAI_TAG=${REPOSITORY_NAME,,}-${CONTAINER_FLAVOR}:${INVOKEAI_TAG:-${INVOKEAI_BRANCH/\//-}} INVOKEAI_TAG=${REPOSITORY_NAME,,}-${CONTAINER_FLAVOR}:${INVOKEAI_TAG:-${INVOKEAI_BRANCH##*/}}

View File

@ -4,17 +4,14 @@ set -e
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container # How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!! # IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
source ./docker-build/env.sh \ cd "$(dirname "$0")" || exit 1
|| echo "please run from repository root" \
|| exit 1
# check if HUGGINGFACE_TOKEN is available source ./env.sh
# You must have accepted the terms of use for required models
HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN:?Please set your token for Huggingface as HUGGINGFACE_TOKEN}
echo -e "You are using these values:\n" echo -e "You are using these values:\n"
echo -e "Volumename:\t${VOLUMENAME}" echo -e "Volumename:\t${VOLUMENAME}"
echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n" echo -e "Invokeai_tag:\t${INVOKEAI_TAG}"
echo -e "local Models:\t${MODELSPATH:-unset}\n"
docker run \ docker run \
--interactive \ --interactive \
@ -23,8 +20,10 @@ docker run \
--platform="$PLATFORM" \ --platform="$PLATFORM" \
--name="${REPOSITORY_NAME,,}" \ --name="${REPOSITORY_NAME,,}" \
--hostname="${REPOSITORY_NAME,,}" \ --hostname="${REPOSITORY_NAME,,}" \
--mount="source=$VOLUMENAME,target=/data" \ --mount=source="$VOLUMENAME",target=/data \
--env="HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}" \ ${MODELSPATH:+-u "$(id -u):$(id -g)"} \
${MODELSPATH:+--mount=type=bind,source=${MODELSPATH},target=/data/models} \
${HUGGING_FACE_HUB_TOKEN:+--env=HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}} \
--publish=9090:9090 \ --publish=9090:9090 \
--cap-add=sys_nice \ --cap-add=sys_nice \
${GPU_FLAGS:+--gpus=${GPU_FLAGS}} \ ${GPU_FLAGS:+--gpus=${GPU_FLAGS}} \

View File

@ -21,6 +21,38 @@ import ldm.invoke
# global used in multiple functions (fix) # global used in multiple functions (fix)
infile = None infile = None
def report_model_error(opt:Namespace, e:Exception):
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
print('** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models.')
if not str("--yes") in os.environ['INVOKE_MODEL_RECONFIGURE'].split():
response = input('Do you want to run configure_invokeai.py to select and/or reinstall models? [y] ')
if response.startswith(('n','N')):
return
print('configure_invokeai is launching....\n')
# Match arguments that were set on the CLI
# only the arguments accepted by the configuration script are parsed
root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else []
config = ["--config", opt.conf] if opt.conf is not None else []
if os.getenv('INVOKE_MODEL_RECONFIGURE'):
yes_to_all = os.environ['INVOKE_MODEL_RECONFIGURE'].split()
else:
yes_to_all = None
previous_args = sys.argv
sys.argv = [ 'configure_invokeai' ]
sys.argv.extend(root_dir)
sys.argv.extend(config)
if yes_to_all is not None:
for argv in yes_to_all:
sys.argv.append(argv)
import ldm.invoke.configure_invokeai as configure_invokeai
sys.exit(configure_invokeai.main())
print('** InvokeAI will now restart')
sys.argv = previous_args
sys.exit(main()) # would rather do a os.exec(), but doesn't exist?
def main(): def main():
"""Initialize command-line parsers and the diffusion model""" """Initialize command-line parsers and the diffusion model"""
global infile global infile
@ -50,10 +82,11 @@ def main():
if not args.conf: if not args.conf:
if not os.path.exists(os.path.join(Globals.root,'configs','models.yaml')): if not os.path.exists(os.path.join(Globals.root,'configs','models.yaml')):
print(f"\n** Error. The file {os.path.join(Globals.root,'configs','models.yaml')} could not be found.") report_model_error(opt, e)
print('** Please check the location of your invokeai directory and use the --root_dir option to point to the correct path.') # print(f"\n** Error. The file {os.path.join(Globals.root,'configs','models.yaml')} could not be found.")
print('** This script will now exit.') # print('** Please check the location of your invokeai directory and use the --root_dir option to point to the correct path.')
sys.exit(-1) # print('** This script will now exit.')
# sys.exit(-1)
print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}') print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}')
print(f'>> InvokeAI runtime directory is "{Globals.root}"') print(f'>> InvokeAI runtime directory is "{Globals.root}"')
@ -1097,34 +1130,6 @@ def write_commands(opt, file_path:str, outfilepath:str):
f.write('\n'.join(commands)) f.write('\n'.join(commands))
print(f'>> File {outfilepath} with commands created') print(f'>> File {outfilepath} with commands created')
def report_model_error(opt:Namespace, e:Exception):
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
print('** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models.')
response = input('Do you want to run configure_invokeai.py to select and/or reinstall models? [y] ')
if response.startswith(('n','N')):
return
print('configure_invokeai is launching....\n')
# Match arguments that were set on the CLI
# only the arguments accepted by the configuration script are parsed
root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else []
config = ["--config", opt.conf] if opt.conf is not None else []
yes_to_all = os.environ.get('INVOKE_MODEL_RECONFIGURE')
previous_args = sys.argv
sys.argv = [ 'configure_invokeai' ]
sys.argv.extend(root_dir)
sys.argv.extend(config)
if yes_to_all is not None:
sys.argv.append(yes_to_all)
import ldm.invoke.configure_invokeai as configure_invokeai
configure_invokeai.main()
print('** InvokeAI will now restart')
sys.argv = previous_args
main() # would rather do a os.exec(), but doesn't exist?
sys.exit(0)
def check_internet()->bool: def check_internet()->bool:
''' '''
Return true if the internet is reachable. Return true if the internet is reachable.