mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
fix broken Dockerfile (#2445)
also switch to `python:3.9-slim` since it has a ton less security issues
This commit is contained in:
commit
af74a2d1f4
@ -1,19 +1,18 @@
|
||||
*
|
||||
!assets/caution.png
|
||||
!backend
|
||||
!environments-and-requirements
|
||||
!frontend
|
||||
!frontend/dist
|
||||
!ldm
|
||||
!main.py
|
||||
!pyproject.toml
|
||||
!README.md
|
||||
!scripts
|
||||
!server
|
||||
!static
|
||||
!setup.py
|
||||
|
||||
# Guard against pulling in any models that might exist in the directory tree
|
||||
**/*.pt*
|
||||
**.pt*
|
||||
|
||||
# unignore configs, but only ignore the custom models.yaml, in case it exists
|
||||
!configs
|
||||
configs/models.yaml
|
||||
configs/models.yaml.orig
|
||||
|
||||
**/__pycache__
|
||||
|
34
.github/workflows/build-container.yml
vendored
34
.github/workflows/build-container.yml
vendored
@ -3,63 +3,59 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
registry:
|
||||
- ghcr.io
|
||||
flavor:
|
||||
- amd
|
||||
- cuda
|
||||
# - cloud
|
||||
include:
|
||||
- flavor: amd
|
||||
pip-requirements: requirements-lin-amd.txt
|
||||
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||
dockerfile: docker-build/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- flavor: cuda
|
||||
pip-requirements: requirements-lin-cuda.txt
|
||||
pip-extra-index-url: ''
|
||||
dockerfile: docker-build/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
# - flavor: cloud
|
||||
# pip-requirements: requirements-lin-cuda.txt
|
||||
# dockerfile: docker-build/Dockerfile.cloud
|
||||
# platforms: linux/amd64
|
||||
runs-on: ubuntu-latest
|
||||
name: ${{ matrix.flavor }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ matrix.registry }}/${{ github.repository }}-${{ matrix.flavor }}
|
||||
images: ghcr.io/${{ github.repository }}-${{ matrix.flavor }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=tag
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=sha
|
||||
flavor: |
|
||||
latest=true
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- if: github.event_name != 'pull_request'
|
||||
name: Docker login
|
||||
- name: Login to GitHub Container Registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ matrix.registry }}
|
||||
username: ${{ github.actor }}
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build container
|
||||
@ -71,4 +67,6 @@ jobs:
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: pip_requirements=${{ matrix.pip-requirements }}
|
||||
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
|
||||
# cache-from: type=gha
|
||||
# cache-to: type=gha,mode=max
|
||||
|
@ -1,59 +1,71 @@
|
||||
FROM python:3.10-slim AS builder
|
||||
# syntax=docker/dockerfile:1
|
||||
FROM python:3.9-slim AS python-base
|
||||
|
||||
# use bash
|
||||
SHELL [ "/bin/bash", "-c" ]
|
||||
|
||||
# Install necesarry packages
|
||||
RUN apt-get update \
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libgl1-mesa-glx=20.3.* \
|
||||
libglib2.0-0=2.66.* \
|
||||
libopencv-dev=4.5.* \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG APPDIR=/usr/src/app
|
||||
ENV APPDIR ${APPDIR}
|
||||
WORKDIR ${APPDIR}
|
||||
|
||||
FROM python-base AS builder
|
||||
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
gcc=4:10.2.* \
|
||||
libgl1-mesa-glx=20.3.* \
|
||||
libglib2.0-0=2.66.* \
|
||||
python3-dev=3.9.* \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# set WORKDIR, PATH and copy sources
|
||||
ARG APPDIR=/usr/src/app
|
||||
WORKDIR ${APPDIR}
|
||||
ENV PATH ${APPDIR}/.venv/bin:$PATH
|
||||
ARG PIP_REQUIREMENTS=requirements-lin-cuda.txt
|
||||
COPY . ./environments-and-requirements/${PIP_REQUIREMENTS} ./
|
||||
# copy sources
|
||||
COPY --link . .
|
||||
ARG PIP_EXTRA_INDEX_URL
|
||||
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
||||
|
||||
# install requirements
|
||||
RUN python3 -m venv .venv \
|
||||
&& pip install \
|
||||
--upgrade \
|
||||
RUN python3 -m venv invokeai \
|
||||
&& ${APPDIR}/invokeai/bin/pip \
|
||||
install \
|
||||
--no-cache-dir \
|
||||
'wheel>=0.38.4' \
|
||||
&& pip install \
|
||||
--no-cache-dir \
|
||||
-r ${PIP_REQUIREMENTS}
|
||||
--use-pep517 \
|
||||
.
|
||||
|
||||
FROM python:3.10-slim AS runtime
|
||||
FROM python-base AS runtime
|
||||
|
||||
# setup environment
|
||||
ARG APPDIR=/usr/src/app
|
||||
WORKDIR ${APPDIR}
|
||||
COPY --from=builder ${APPDIR} .
|
||||
ENV \
|
||||
PATH=${APPDIR}/.venv/bin:$PATH \
|
||||
INVOKEAI_ROOT=/data \
|
||||
INVOKE_MODEL_RECONFIGURE=--yes
|
||||
COPY --link . .
|
||||
COPY --from=builder ${APPDIR}/invokeai ${APPDIR}/invokeai
|
||||
ENV PATH=${APPDIR}/invokeai/bin:$PATH
|
||||
ENV INVOKEAI_ROOT=/data
|
||||
ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
|
||||
|
||||
# Install necesarry packages
|
||||
RUN apt-get update \
|
||||
# build patchmatch
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
build-essential=12.9 \
|
||||
libgl1-mesa-glx=20.3.* \
|
||||
libglib2.0-0=2.66.* \
|
||||
libopencv-dev=4.5.* \
|
||||
&& ln -sf \
|
||||
/usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv4.pc \
|
||||
/usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv.pc \
|
||||
&& python3 -c "from patchmatch import patch_match" \
|
||||
&& PYTHONDONTWRITEBYTECODE=1 \
|
||||
python3 -c "from patchmatch import patch_match" \
|
||||
&& apt-get remove -y \
|
||||
--autoremove \
|
||||
build-essential \
|
||||
@ -61,5 +73,6 @@ RUN apt-get update \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# set Entrypoint and default CMD
|
||||
ENTRYPOINT [ "python3", "scripts/invoke.py" ]
|
||||
ENTRYPOINT [ "invoke" ]
|
||||
CMD [ "--web", "--host=0.0.0.0" ]
|
||||
VOLUME [ "/data" ]
|
||||
|
@ -2,34 +2,41 @@
|
||||
set -e
|
||||
|
||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
|
||||
#
|
||||
# Some possible pip extra-index urls (cuda 11.7 is available without extra url):
|
||||
#
|
||||
# CUDA 11.6: https://download.pytorch.org/whl/cu116
|
||||
# ROCm 5.2: https://download.pytorch.org/whl/rocm5.2
|
||||
# CPU: https://download.pytorch.org/whl/cpu
|
||||
#
|
||||
# as found on https://pytorch.org/get-started/locally/
|
||||
|
||||
source ./docker-build/env.sh \
|
||||
|| echo "please execute docker-build/build.sh from repository root" \
|
||||
|| exit 1
|
||||
cd "$(dirname "$0")" || exit 1
|
||||
|
||||
PIP_REQUIREMENTS=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt}
|
||||
DOCKERFILE=${INVOKE_DOCKERFILE:-docker-build/Dockerfile}
|
||||
source ./env.sh
|
||||
|
||||
DOCKERFILE=${INVOKE_DOCKERFILE:-"./Dockerfile"}
|
||||
|
||||
# print the settings
|
||||
echo -e "You are using these values:\n"
|
||||
echo -e "Dockerfile:\t ${DOCKERFILE}"
|
||||
echo -e "Requirements:\t ${PIP_REQUIREMENTS}"
|
||||
echo -e "extra-index-url: ${PIP_EXTRA_INDEX_URL:-none}"
|
||||
echo -e "Volumename:\t ${VOLUMENAME}"
|
||||
echo -e "arch:\t\t ${ARCH}"
|
||||
echo -e "Platform:\t ${PLATFORM}"
|
||||
echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n"
|
||||
|
||||
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
|
||||
echo -e "Volume already exists\n"
|
||||
echo -e "Volume already exists\n"
|
||||
else
|
||||
echo -n "createing docker volume "
|
||||
docker volume create "${VOLUMENAME}"
|
||||
echo -n "createing docker volume "
|
||||
docker volume create "${VOLUMENAME}"
|
||||
fi
|
||||
|
||||
# Build Container
|
||||
docker build \
|
||||
--platform="${PLATFORM}" \
|
||||
--tag="${INVOKEAI_TAG}" \
|
||||
--build-arg="PIP_REQUIREMENTS=${PIP_REQUIREMENTS}" \
|
||||
--file="${DOCKERFILE}" \
|
||||
.
|
||||
--platform="${PLATFORM}" \
|
||||
--tag="${INVOKEAI_TAG}" \
|
||||
${PIP_EXTRA_INDEX_URL:+--build-arg=PIP_EXTRA_INDEX_URL="${PIP_EXTRA_INDEX_URL}"} \
|
||||
--file="${DOCKERFILE}" \
|
||||
..
|
||||
|
@ -7,4 +7,4 @@ ARCH=${ARCH:-$(uname -m)}
|
||||
PLATFORM=${PLATFORM:-Linux/${ARCH}}
|
||||
CONTAINER_FLAVOR=${CONTAINER_FLAVOR:-cuda}
|
||||
INVOKEAI_BRANCH=$(git branch --show)
|
||||
INVOKEAI_TAG=${REPOSITORY_NAME,,}-${CONTAINER_FLAVOR}:${INVOKEAI_TAG:-${INVOKEAI_BRANCH/\//-}}
|
||||
INVOKEAI_TAG=${REPOSITORY_NAME,,}-${CONTAINER_FLAVOR}:${INVOKEAI_TAG:-${INVOKEAI_BRANCH##*/}}
|
||||
|
@ -4,17 +4,14 @@ set -e
|
||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
|
||||
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
|
||||
|
||||
source ./docker-build/env.sh \
|
||||
|| echo "please run from repository root" \
|
||||
|| exit 1
|
||||
cd "$(dirname "$0")" || exit 1
|
||||
|
||||
# check if HUGGINGFACE_TOKEN is available
|
||||
# You must have accepted the terms of use for required models
|
||||
HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN:?Please set your token for Huggingface as HUGGINGFACE_TOKEN}
|
||||
source ./env.sh
|
||||
|
||||
echo -e "You are using these values:\n"
|
||||
echo -e "Volumename:\t ${VOLUMENAME}"
|
||||
echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n"
|
||||
echo -e "Volumename:\t${VOLUMENAME}"
|
||||
echo -e "Invokeai_tag:\t${INVOKEAI_TAG}"
|
||||
echo -e "local Models:\t${MODELSPATH:-unset}\n"
|
||||
|
||||
docker run \
|
||||
--interactive \
|
||||
@ -23,8 +20,10 @@ docker run \
|
||||
--platform="$PLATFORM" \
|
||||
--name="${REPOSITORY_NAME,,}" \
|
||||
--hostname="${REPOSITORY_NAME,,}" \
|
||||
--mount="source=$VOLUMENAME,target=/data" \
|
||||
--env="HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}" \
|
||||
--mount=source="$VOLUMENAME",target=/data \
|
||||
${MODELSPATH:+-u "$(id -u):$(id -g)"} \
|
||||
${MODELSPATH:+--mount=type=bind,source=${MODELSPATH},target=/data/models} \
|
||||
${HUGGING_FACE_HUB_TOKEN:+--env=HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}} \
|
||||
--publish=9090:9090 \
|
||||
--cap-add=sys_nice \
|
||||
${GPU_FLAGS:+--gpus=${GPU_FLAGS}} \
|
||||
|
@ -21,6 +21,38 @@ import ldm.invoke
|
||||
# global used in multiple functions (fix)
|
||||
infile = None
|
||||
|
||||
def report_model_error(opt:Namespace, e:Exception):
|
||||
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
|
||||
print('** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models.')
|
||||
if not str("--yes") in os.environ['INVOKE_MODEL_RECONFIGURE'].split():
|
||||
response = input('Do you want to run configure_invokeai.py to select and/or reinstall models? [y] ')
|
||||
if response.startswith(('n','N')):
|
||||
return
|
||||
|
||||
print('configure_invokeai is launching....\n')
|
||||
|
||||
# Match arguments that were set on the CLI
|
||||
# only the arguments accepted by the configuration script are parsed
|
||||
root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else []
|
||||
config = ["--config", opt.conf] if opt.conf is not None else []
|
||||
if os.getenv('INVOKE_MODEL_RECONFIGURE'):
|
||||
yes_to_all = os.environ['INVOKE_MODEL_RECONFIGURE'].split()
|
||||
else:
|
||||
yes_to_all = None
|
||||
previous_args = sys.argv
|
||||
sys.argv = [ 'configure_invokeai' ]
|
||||
sys.argv.extend(root_dir)
|
||||
sys.argv.extend(config)
|
||||
if yes_to_all is not None:
|
||||
for argv in yes_to_all:
|
||||
sys.argv.append(argv)
|
||||
|
||||
import ldm.invoke.configure_invokeai as configure_invokeai
|
||||
sys.exit(configure_invokeai.main())
|
||||
print('** InvokeAI will now restart')
|
||||
sys.argv = previous_args
|
||||
sys.exit(main()) # would rather do a os.exec(), but doesn't exist?
|
||||
|
||||
def main():
|
||||
"""Initialize command-line parsers and the diffusion model"""
|
||||
global infile
|
||||
@ -50,10 +82,11 @@ def main():
|
||||
|
||||
if not args.conf:
|
||||
if not os.path.exists(os.path.join(Globals.root,'configs','models.yaml')):
|
||||
print(f"\n** Error. The file {os.path.join(Globals.root,'configs','models.yaml')} could not be found.")
|
||||
print('** Please check the location of your invokeai directory and use the --root_dir option to point to the correct path.')
|
||||
print('** This script will now exit.')
|
||||
sys.exit(-1)
|
||||
report_model_error(opt, e)
|
||||
# print(f"\n** Error. The file {os.path.join(Globals.root,'configs','models.yaml')} could not be found.")
|
||||
# print('** Please check the location of your invokeai directory and use the --root_dir option to point to the correct path.')
|
||||
# print('** This script will now exit.')
|
||||
# sys.exit(-1)
|
||||
|
||||
print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}')
|
||||
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
|
||||
@ -1097,34 +1130,6 @@ def write_commands(opt, file_path:str, outfilepath:str):
|
||||
f.write('\n'.join(commands))
|
||||
print(f'>> File {outfilepath} with commands created')
|
||||
|
||||
def report_model_error(opt:Namespace, e:Exception):
|
||||
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
|
||||
print('** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models.')
|
||||
response = input('Do you want to run configure_invokeai.py to select and/or reinstall models? [y] ')
|
||||
if response.startswith(('n','N')):
|
||||
return
|
||||
|
||||
print('configure_invokeai is launching....\n')
|
||||
|
||||
# Match arguments that were set on the CLI
|
||||
# only the arguments accepted by the configuration script are parsed
|
||||
root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else []
|
||||
config = ["--config", opt.conf] if opt.conf is not None else []
|
||||
yes_to_all = os.environ.get('INVOKE_MODEL_RECONFIGURE')
|
||||
previous_args = sys.argv
|
||||
sys.argv = [ 'configure_invokeai' ]
|
||||
sys.argv.extend(root_dir)
|
||||
sys.argv.extend(config)
|
||||
if yes_to_all is not None:
|
||||
sys.argv.append(yes_to_all)
|
||||
|
||||
import ldm.invoke.configure_invokeai as configure_invokeai
|
||||
configure_invokeai.main()
|
||||
print('** InvokeAI will now restart')
|
||||
sys.argv = previous_args
|
||||
main() # would rather do a os.exec(), but doesn't exist?
|
||||
sys.exit(0)
|
||||
|
||||
def check_internet()->bool:
|
||||
'''
|
||||
Return true if the internet is reachable.
|
||||
|
Loading…
Reference in New Issue
Block a user