mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
update dockerfile (#1551)
* update dockerfile * remove not existing file from .dockerignore * remove bloat and unecesary step also use --no-cache-dir for pip install image is now close to 2GB * make Dockerfile a variable * set base image to `ubuntu:22.10` * add build-essential * link outputs folder for persistence * update tag variable * update docs * fix not customizeable build args, add reqs output
This commit is contained in:
parent
08ef4d62e9
commit
3f6d0fb7da
@ -1,3 +1,12 @@
|
||||
*
|
||||
!environment*.yml
|
||||
!docker-build
|
||||
!backend
|
||||
!configs
|
||||
!environments-and-requirements
|
||||
!frontend
|
||||
!installer
|
||||
!ldm
|
||||
!main.py
|
||||
!scripts
|
||||
!server
|
||||
!static
|
||||
!setup.py
|
||||
|
19
.github/workflows/build-container.yml
vendored
19
.github/workflows/build-container.yml
vendored
@ -6,6 +6,7 @@ on:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
- 'update-dockerfile'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
@ -15,13 +16,11 @@ jobs:
|
||||
arch:
|
||||
- x86_64
|
||||
- aarch64
|
||||
include:
|
||||
- arch: x86_64
|
||||
conda-env-file: environment-lin-cuda.yml
|
||||
- arch: aarch64
|
||||
conda-env-file: environment-lin-aarch64.yml
|
||||
pip-requirements:
|
||||
- requirements-lin-amd.txt
|
||||
- requirements-lin-cuda.txt
|
||||
runs-on: ubuntu-latest
|
||||
name: ${{ matrix.arch }}
|
||||
name: ${{ matrix.pip-requirements }} ${{ matrix.arch }}
|
||||
steps:
|
||||
- name: prepare docker-tag
|
||||
env:
|
||||
@ -40,9 +39,5 @@ jobs:
|
||||
file: docker-build/Dockerfile
|
||||
platforms: Linux/${{ matrix.arch }}
|
||||
push: false
|
||||
tags: ${{ env.dockertag }}:${{ matrix.arch }}
|
||||
build-args: |
|
||||
conda_env_file=${{ matrix.conda-env-file }}
|
||||
conda_version=py39_4.12.0-Linux-${{ matrix.arch }}
|
||||
invokeai_git=${{ github.repository }}
|
||||
invokeai_branch=${{ github.ref_name }}
|
||||
tags: ${{ env.dockertag }}:${{ matrix.pip-requirements }}-${{ matrix.arch }}
|
||||
build-args: pip_requirements=${{ matrix.pip-requirements }}
|
||||
|
@ -1,34 +1,13 @@
|
||||
FROM ubuntu AS get_miniconda
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# install wget
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
wget \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# download and install miniconda
|
||||
ARG conda_version=py39_4.12.0-Linux-x86_64
|
||||
ARG conda_prefix=/opt/conda
|
||||
RUN wget --progress=dot:giga -O /miniconda.sh \
|
||||
https://repo.anaconda.com/miniconda/Miniconda3-${conda_version}.sh \
|
||||
&& bash /miniconda.sh -b -p ${conda_prefix} \
|
||||
&& rm -f /miniconda.sh
|
||||
|
||||
FROM ubuntu AS invokeai
|
||||
FROM ubuntu:22.10
|
||||
|
||||
# use bash
|
||||
SHELL [ "/bin/bash", "-c" ]
|
||||
|
||||
# clean bashrc
|
||||
RUN echo "" > ~/.bashrc
|
||||
|
||||
# Install necesarry packages
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
build-essential \
|
||||
gcc \
|
||||
git \
|
||||
libgl1-mesa-glx \
|
||||
@ -39,41 +18,24 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# clone repository, create models.yaml and create symlinks
|
||||
ARG invokeai_git=invoke-ai/InvokeAI
|
||||
ARG invokeai_branch=main
|
||||
ARG project_name=invokeai
|
||||
ARG conda_env_file=environment-lin-cuda.yml
|
||||
RUN git clone -b ${invokeai_branch} https://github.com/${invokeai_git}.git "/${project_name}" \
|
||||
&& ln -sf \
|
||||
"/${project_name}/environments-and-requirements/${conda_env_file}" \
|
||||
"/${project_name}/environment.yml" \
|
||||
&& ln -sf \
|
||||
/data/outputs/ \
|
||||
"/${project_name}/outputs"
|
||||
ARG PIP_REQUIREMENTS=requirements-lin-cuda.txt
|
||||
ARG PROJECT_NAME=invokeai
|
||||
ARG INVOKEAI_ROOT=/data
|
||||
ENV INVOKEAI_ROOT=${INVOKEAI_ROOT}
|
||||
|
||||
# set workdir and copy models.yaml
|
||||
WORKDIR "/${project_name}"
|
||||
COPY docker-build/models.yaml configs/models.yaml
|
||||
# set workdir and copy sources
|
||||
WORKDIR /${PROJECT_NAME}
|
||||
COPY . .
|
||||
|
||||
# install conda env and preload models
|
||||
ARG conda_prefix=/opt/conda
|
||||
COPY --from=get_miniconda "${conda_prefix}" "${conda_prefix}"
|
||||
RUN source "${conda_prefix}/etc/profile.d/conda.sh" \
|
||||
&& conda init bash \
|
||||
&& source ~/.bashrc \
|
||||
&& conda env create \
|
||||
--name "${project_name}" \
|
||||
&& rm -Rf ~/.cache \
|
||||
&& conda clean -afy \
|
||||
&& echo "conda activate ${project_name}" >> ~/.bashrc
|
||||
# install requirements and link outputs folder
|
||||
RUN cp \
|
||||
./environments-and-requirements/${PIP_REQUIREMENTS} \
|
||||
${PIP_REQUIREMENTS} \
|
||||
&& pip install \
|
||||
--no-cache-dir \
|
||||
-r ${PIP_REQUIREMENTS} \
|
||||
&& ln -sf /data/outputs /${PROJECT_NAME}/outputs
|
||||
|
||||
RUN source ~/.bashrc \
|
||||
&& python scripts/preload_models.py \
|
||||
--no-interactive
|
||||
|
||||
# Copy entrypoint and set env
|
||||
ENV CONDA_PREFIX="${conda_prefix}"
|
||||
ENV PROJECT_NAME="${project_name}"
|
||||
COPY docker-build/entrypoint.sh /
|
||||
ENTRYPOINT [ "/entrypoint.sh" ]
|
||||
# set Entrypoint and default CMD
|
||||
ENTRYPOINT [ "python3" ]
|
||||
CMD [ "scripts/invoke.py", "--web", "--host", "0.0.0.0" ]
|
||||
|
@ -6,23 +6,17 @@ set -e
|
||||
|
||||
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
||||
|
||||
invokeai_conda_version=${INVOKEAI_CONDA_VERSION:-py39_4.12.0-${platform/\//-}}
|
||||
invokeai_conda_prefix=${INVOKEAI_CONDA_PREFIX:-\/opt\/conda}
|
||||
invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment-lin-cuda.yml}
|
||||
invokeai_git=${INVOKEAI_GIT:-invoke-ai/InvokeAI}
|
||||
invokeai_branch=${INVOKEAI_BRANCH:-main}
|
||||
huggingface_token=${HUGGINGFACE_TOKEN?}
|
||||
pip_requirements=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt}
|
||||
dockerfile=${INVOKE_DOCKERFILE:-docker-build/Dockerfile}
|
||||
|
||||
# print the settings
|
||||
echo "You are using these values:"
|
||||
echo -e "Dockerfile:\t\t ${dockerfile}"
|
||||
echo -e "requirements:\t\t ${pip_requirements}"
|
||||
echo -e "project_name:\t\t ${project_name}"
|
||||
echo -e "volumename:\t\t ${volumename}"
|
||||
echo -e "arch:\t\t\t ${arch}"
|
||||
echo -e "platform:\t\t ${platform}"
|
||||
echo -e "invokeai_conda_version:\t ${invokeai_conda_version}"
|
||||
echo -e "invokeai_conda_prefix:\t ${invokeai_conda_prefix}"
|
||||
echo -e "invokeai_conda_env_file: ${invokeai_conda_env_file}"
|
||||
echo -e "invokeai_git:\t\t ${invokeai_git}"
|
||||
echo -e "invokeai_tag:\t\t ${invokeai_tag}\n"
|
||||
|
||||
_runAlpine() {
|
||||
@ -35,50 +29,33 @@ _runAlpine() {
|
||||
alpine "$@"
|
||||
}
|
||||
|
||||
_copyCheckpoints() {
|
||||
echo "creating subfolders for models and outputs"
|
||||
_runAlpine mkdir models
|
||||
_runAlpine mkdir outputs
|
||||
echo "downloading v1-5-pruned-emaonly.ckpt"
|
||||
_runAlpine wget \
|
||||
--header="Authorization: Bearer ${huggingface_token}" \
|
||||
-O models/v1-5-pruned-emaonly.ckpt \
|
||||
https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
|
||||
echo "done"
|
||||
}
|
||||
|
||||
_checkVolumeContent() {
|
||||
_runAlpine ls -lhA /data/models
|
||||
}
|
||||
|
||||
_getModelMd5s() {
|
||||
_runAlpine \
|
||||
alpine sh -c "md5sum /data/models/*.ckpt"
|
||||
}
|
||||
|
||||
if [[ -n "$(docker volume ls -f name="${volumename}" -q)" ]]; then
|
||||
echo "Volume already exists"
|
||||
if [[ -z "$(_checkVolumeContent)" ]]; then
|
||||
echo "looks empty, copying checkpoint"
|
||||
_copyCheckpoints
|
||||
fi
|
||||
echo "Models in ${volumename}:"
|
||||
_checkVolumeContent
|
||||
else
|
||||
echo -n "createing docker volume "
|
||||
docker volume create "${volumename}"
|
||||
_copyCheckpoints
|
||||
fi
|
||||
|
||||
# Build Container
|
||||
docker build \
|
||||
--platform="${platform}" \
|
||||
--tag "${invokeai_tag}" \
|
||||
--build-arg project_name="${project_name}" \
|
||||
--build-arg conda_version="${invokeai_conda_version}" \
|
||||
--build-arg conda_prefix="${invokeai_conda_prefix}" \
|
||||
--build-arg conda_env_file="${invokeai_conda_env_file}" \
|
||||
--build-arg invokeai_git="${invokeai_git}" \
|
||||
--build-arg invokeai_branch="${invokeai_branch}" \
|
||||
--file ./docker-build/Dockerfile \
|
||||
--tag="${invokeai_tag}" \
|
||||
--build-arg="PROJECT_NAME=${project_name}" \
|
||||
--build-arg="PIP_REQUIREMENTS=${pip_requirements}" \
|
||||
--file="${dockerfile}" \
|
||||
.
|
||||
|
||||
docker run \
|
||||
--rm \
|
||||
--platform="$platform" \
|
||||
--name="$project_name" \
|
||||
--hostname="$project_name" \
|
||||
--mount="source=$volumename,target=/data" \
|
||||
--mount="type=bind,source=$HOME/.huggingface,target=/root/.huggingface" \
|
||||
--env="HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}" \
|
||||
"${invokeai_tag}" \
|
||||
scripts/configure_invokeai.py --yes
|
||||
|
@ -1,8 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
source "${CONDA_PREFIX}/etc/profile.d/conda.sh"
|
||||
conda activate "${PROJECT_NAME}"
|
||||
|
||||
python scripts/invoke.py \
|
||||
${@:---web --host=0.0.0.0}
|
@ -4,7 +4,7 @@ project_name=${PROJECT_NAME:-invokeai}
|
||||
volumename=${VOLUMENAME:-${project_name}_data}
|
||||
arch=${ARCH:-x86_64}
|
||||
platform=${PLATFORM:-Linux/${arch}}
|
||||
invokeai_tag=${INVOKEAI_TAG:-${project_name}-${arch}}
|
||||
invokeai_tag=${INVOKEAI_TAG:-${project_name}:${arch}}
|
||||
|
||||
export project_name
|
||||
export volumename
|
||||
|
@ -1,8 +0,0 @@
|
||||
stable-diffusion-1.5:
|
||||
description: The newest Stable Diffusion version 1.5 weight file (4.27 GB)
|
||||
weights: /data/models/v1-5-pruned-emaonly.ckpt
|
||||
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||
width: 512
|
||||
height: 512
|
||||
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
default: true
|
@ -72,14 +72,19 @@ created in the last step.
|
||||
|
||||
Some Suggestions of variables you may want to change besides the Token:
|
||||
|
||||
| Environment-Variable | Default value | Description |
|
||||
| ------------------------- | ----------------------------- | ---------------------------------------------------------------------------- |
|
||||
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without you can't get the checkpoint |
|
||||
| `ARCH` | x86_64 | if you are using a ARM based CPU |
|
||||
| `INVOKEAI_TAG` | invokeai-x86_64 | the Container Repository / Tag which will be used |
|
||||
| `INVOKEAI_CONDA_ENV_FILE` | environment-lin-cuda.yml | since environment.yml wouldn't work with aarch |
|
||||
| `INVOKEAI_GIT` | invoke-ai/InvokeAI | the repository to use |
|
||||
| `INVOKEAI_BRANCH` | main | the branch to checkout |
|
||||
<figure markdown>
|
||||
|
||||
| Environment-Variable | Default value | Description |
|
||||
| -------------------- | ----------------------------- | -------------------------------------------------------------------------------------------- |
|
||||
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
|
||||
| `PROJECT_NAME` | `invokeai` | affects the project folder, tag- and volume name |
|
||||
| `VOLUMENAME` | `${PROJECT_NAME}_data` | affects the project folder, tag- and volume name |
|
||||
| `ARCH` | `x86_64` | can be changed to f.e. aarch64 if you are using a ARM based CPU |
|
||||
| `INVOKEAI_TAG` | `${PROJECT_NAME}:${ARCH}` | the Container Repository / Tag which will be used |
|
||||
| `PIP_REQUIREMENTS` | `requirements-lin-cuda.txt` | the requirements file to use (from `environments-and-requirements`) |
|
||||
| `INVOKE_DOCKERFILE` | `docker-build/Dockerfile` | the Dockerfile which should be built, handy for development |
|
||||
|
||||
</figure>
|
||||
|
||||
#### Build the Image
|
||||
|
||||
@ -109,12 +114,12 @@ also do so.
|
||||
!!! example ""
|
||||
|
||||
```bash
|
||||
./docker-build/run.sh --from_file tests/validate_pr_prompt.txt
|
||||
./docker-build/run.sh scripts/invoke.py
|
||||
```
|
||||
|
||||
The output folder is located on the volume which is also used to store the model.
|
||||
This would start the CLI instead of the default command that starts the webserver.
|
||||
|
||||
Find out more about available CLI-Parameters at [features/CLI.md](../features/CLI.md/#arguments)
|
||||
Find out more about available CLI-Parameters at [features/CLI.md](../../features/CLI/#arguments)
|
||||
|
||||
---
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user