diff --git a/.dockerignore b/.dockerignore index 60195f608a..255335040f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,12 +1,26 @@ * !backend -!configs -!environments-and-requirements !frontend -!installer +!binary_installer !ldm !main.py !scripts !server !static !setup.py +!docker-build +!docs +docker-build/Dockerfile + +# Guard against pulling in any models that might exist in the directory tree +**/*.pt* + +# unignore configs, but only ignore the custom models.yaml, in case it exists +!configs +configs/models.yaml + +# unignore environment dirs/files, but ignore the environment.yml file or symlink in case it exists +!environment* +environment.yml + +**/__pycache__ diff --git a/.github/workflows/build-cloud-img.yml b/.github/workflows/build-cloud-img.yml new file mode 100644 index 0000000000..a1a2f06a12 --- /dev/null +++ b/.github/workflows/build-cloud-img.yml @@ -0,0 +1,63 @@ +name: Build and push cloud image +on: + workflow_dispatch: + push: + branches: + - main + - development + tags: + - v* + +permissions: + contents: read + packages: write + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + docker: + strategy: + fail-fast: false + matrix: + # only x86_64 for now. aarch64+cuda isn't really a thing yet + arch: + - x86_64 + runs-on: ubuntu-latest + name: ${{ matrix.arch }} + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v4 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=ref,event=branch + type=ref,event=tag + type=ref,event=pr + type=sha + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - if: github.event_name != 'pull_request' + name: Docker login + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push cloud image + uses: docker/build-push-action@v3 + with: + context: . + file: docker-build/Dockerfile.cloud + platforms: Linux/${{ matrix.arch }} + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} \ No newline at end of file diff --git a/docker-build/Dockerfile.cloud b/docker-build/Dockerfile.cloud new file mode 100644 index 0000000000..db6e1523b3 --- /dev/null +++ b/docker-build/Dockerfile.cloud @@ -0,0 +1,86 @@ +####################### +#### Builder stage #### + +FROM library/ubuntu:22.04 AS builder + +ARG DEBIAN_FRONTEND=noninteractive +RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt update && apt-get install -y \ + git \ + libglib2.0-0 \ + libgl1-mesa-glx \ + python3-venv \ + python3-pip \ + build-essential \ + python3-opencv \ + libopencv-dev + +# This is needed for patchmatch support +RUN cd /usr/lib/x86_64-linux-gnu/pkgconfig/ &&\ + ln -sf opencv4.pc opencv.pc + +ARG WORKDIR=/invokeai +WORKDIR ${WORKDIR} + +ENV VIRTUAL_ENV=${WORKDIR}/.venv +ENV PATH="$VIRTUAL_ENV/bin:$PATH" + +RUN --mount=type=cache,target=/root/.cache/pip \ + python3 -m venv ${VIRTUAL_ENV} &&\ + pip install --extra-index-url https://download.pytorch.org/whl/cu116 \ + torch==1.12.0+cu116 \ + torchvision==0.13.0+cu116 &&\ + pip install -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch + +COPY . . +RUN --mount=type=cache,target=/root/.cache/pip \ + cp binary_installer/py3.10-linux-x86_64-cuda-reqs.txt requirements.txt && \ + pip install -r requirements.txt &&\ + pip install -e . + + +####################### +#### Runtime stage #### + +FROM library/ubuntu:22.04 as runtime + +ARG DEBIAN_FRONTEND=noninteractive +ENV PYTHONUNBUFFERED=1 +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt update && apt install -y --no-install-recommends \ + git \ + curl \ + ncdu \ + iotop \ + bzip2 \ + libglib2.0-0 \ + libgl1-mesa-glx \ + python3-venv \ + python3-pip \ + build-essential \ + python3-opencv \ + libopencv-dev &&\ + apt-get clean && apt-get autoclean + +ARG WORKDIR=/invokeai +WORKDIR ${WORKDIR} + +ENV INVOKEAI_ROOT=/mnt/invokeai +ENV VIRTUAL_ENV=${WORKDIR}/.venv +ENV PATH="$VIRTUAL_ENV/bin:$PATH" + +COPY --from=builder ${WORKDIR} ${WORKDIR} +COPY --from=builder /usr/lib/x86_64-linux-gnu/pkgconfig /usr/lib/x86_64-linux-gnu/pkgconfig + +# build patchmatch +RUN python -c "from patchmatch import patch_match" + +## workaround for non-existent initfile when runtime directory is mounted; see #1613 +RUN touch /root/.invokeai + +ENTRYPOINT ["bash"] + +CMD ["-c", "python3 scripts/invoke.py --web --host 0.0.0.0"] diff --git a/docker-build/Makefile b/docker-build/Makefile new file mode 100644 index 0000000000..963caee9e4 --- /dev/null +++ b/docker-build/Makefile @@ -0,0 +1,44 @@ +# Directory in the container where the INVOKEAI_ROOT (runtime dir) will be mounted +INVOKEAI_ROOT=/mnt/invokeai +# Host directory to contain the runtime dir. Will be mounted at INVOKEAI_ROOT path in the container +HOST_MOUNT_PATH=${HOME}/invokeai + +IMAGE=local/invokeai:latest + +USER=$(shell id -u) +GROUP=$(shell id -g) + +# All downloaded models, config, etc will end up in ${HOST_MOUNT_PATH} on the host. +# This is consistent with the expected non-Docker behaviour. +# Contents can be moved to a persistent storage and used to prime the cache on another host. + +build: + DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud .. + +configure: + docker run --rm -it --runtime=nvidia --gpus=all \ + -v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \ + -e INVOKEAI_ROOT=${INVOKEAI_ROOT} \ + ${IMAGE} -c "python scripts/configure_invokeai.py" + +# Run the container with the runtime dir mounted and the web server exposed on port 9090 +web: + docker run --rm -it --runtime=nvidia --gpus=all \ + -v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \ + -e INVOKEAI_ROOT=${INVOKEAI_ROOT} \ + -p 9090:9090 \ + ${IMAGE} -c "python scripts/invoke.py --web --host 0.0.0.0" + +# Run the cli with the runtime dir mounted +cli: + docker run --rm -it --runtime=nvidia --gpus=all \ + -v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \ + -e INVOKEAI_ROOT=${INVOKEAI_ROOT} \ + ${IMAGE} -c "python scripts/invoke.py" + +# Run the container with the runtime dir mounted and open a bash shell +shell: + docker run --rm -it --runtime=nvidia --gpus=all \ + -v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} ${IMAGE} -- + +.PHONY: build configure web cli shell diff --git a/docs/installation/INSTALL_DOCKER.md b/docs/installation/INSTALL_DOCKER.md index 934ebc7e88..704662a06f 100644 --- a/docs/installation/INSTALL_DOCKER.md +++ b/docs/installation/INSTALL_DOCKER.md @@ -6,7 +6,7 @@ title: Docker !!! warning "For end users" - We highly recommend to Install InvokeAI locally using [these instructions](index.md)" + We highly recommend to Install InvokeAI locally using [these instructions](index.md) !!! tip "For developers" @@ -16,6 +16,10 @@ title: Docker For general use, install locally to leverage your machine's GPU. +!!! tip "For running on a cloud instance/service" + + Check out the [Running InvokeAI in the cloud with Docker](#running-invokeai-in-the-cloud-with-docker) section below + ## Why containers? They provide a flexible, reliable way to build and deploy InvokeAI. You'll also @@ -36,7 +40,7 @@ development purposes it's fine. Once you're done with development tasks on your laptop you can build for the target platform and architecture and deploy to another environment with NVIDIA GPUs on-premises or in the cloud. -## Installation on a Linux container +## Installation in a Linux container (desktop) ### Prerequisites @@ -117,12 +121,91 @@ also do so. ./docker-build/run.sh "banana sushi" -Ak_lms -S42 -s10 ``` - This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps. + This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps. Find out more about available CLI-Parameters at [features/CLI.md](../../features/CLI/#arguments) --- +## Running InvokeAI in the cloud with Docker + +We offer an optimized Ubuntu-based image that has been well-tested in cloud deployments. Note: it also works well locally on Linux x86_64 systems with an Nvidia GPU. It *may* also work on Windows under WSL2 and on Intel Mac (not tested). + +An advantage of this method is that it does not need any local setup or additional dependencies. + +See the `docker-build/Dockerfile.cloud` file to familizarize yourself with the image's content. + +### Prerequisites + +- a `docker` runtime +- `make` (optional but helps for convenience) +- Huggingface token to download models, or an existing InvokeAI runtime directory from a previous installation + + Neither local Python nor any dependencies are required. If you don't have `make` (part of `build-essentials` on Ubuntu), or do not wish to install it, the commands from the `docker-build/Makefile` are readily adaptable to be executed directly. + +### Building and running the image locally + +1. Clone this repo and `cd docker-build` +1. `make build` - this will build the image. (This does *not* require a GPU-capable system). +1. _(skip this step if you already have a complete InvokeAI runtime directory)_ + - `make configure` (This does *not* require a GPU-capable system) + - this will create a local cache of models and configs (a.k.a the _runtime dir_) + - enter your Huggingface token when prompted +1. `make web` +1. Open the `http://localhost:9090` URL in your browser, and enjoy the banana sushi! + +To use InvokeAI on the cli, run `make cli`. To open a Bash shell in the container for arbitraty advanced use, `make shell`. + +#### Building and running without `make` + +(Feel free to adapt paths such as `${HOME}/invokeai` to your liking, and modify the CLI arguments as necessary). + +!!! example "Build the image and configure the runtime directory" + ```Shell + cd docker-build + + DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud .. + + docker run --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/configure_invokeai.py" + ``` + +!!! example "Run the web server" + ```Shell + docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai -p9090:9090 local/invokeai:latest + ``` + + Access the Web UI at http://localhost:9090 + +!!! example "Run the InvokeAI interactive CLI" + ``` + docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/invoke.py" + ``` + +### Running the image in the cloud + +This image works anywhere you can run a container with a mounted Docker volume. You may either build this image on a cloud instance, or build and push it to your Docker registry. To manually run this on a cloud instance (such as AWS EC2, GCP or Azure VM): + +1. build this image either in the cloud (you'll need to pull the repo), or locally +1. `docker tag` it as `your-registry/invokeai` and push to your registry (i.e. Dockerhub) +1. `docker pull` it on your cloud instance +1. configure the runtime directory as per above example, using `docker run ... configure_invokeai.py` script +1. use either one of the `docker run` commands above, substituting the image name for your own image. + +To run this on Runpod, please refer to the following Runpod template: https://www.runpod.io/console/gpu-secure-cloud?template=vm19ukkycf (you need a Runpod subscription). When launching the template, feel free to set the image to pull your own build. + +The template's `README` provides ample detail, but at a high level, the process is as follows: + +1. create a pod using this Docker image +1. ensure the pod has an `INVOKEAI_ROOT=` environment variable, and that it corresponds to the path to your pod's persistent volume mount +1. Run the pod with `sleep infinity` as the Docker command +1. Use Runpod basic SSH to connect to the pod, and run `python scripts/configure_invokeai.py` script +1. Stop the pod, and change the Docker command to `python scripts/invoke.py --web --host 0.0.0.0` +1. Run the pod again, connect to your pod on HTTP port 9090, and enjoy the banana sushi! + +Running on other cloud providers such as Vast.ai will likely work in a similar fashion. + +--- + !!! warning "Deprecated" From here on you will find the the previous Docker-Docs, which will still @@ -135,12 +218,12 @@ also do so. If you're on a **Linux container** the `invoke` script is **automatically started** and the output dir set to the Docker volume you created earlier. -If you're **directly on macOS follow these startup instructions**. +If you're **directly on macOS follow these startup instructions**. With the Conda environment activated (`conda activate ldm`), run the interactive interface that combines the functionality of the original scripts `txt2img` and -`img2img`: +`img2img`: Use the more accurate but VRAM-intensive full precision math because -half-precision requires autocast and won't work. +half-precision requires autocast and won't work. By default the images are saved in `outputs/img-samples/`. ```Shell @@ -157,8 +240,8 @@ invoke> q ### Text to Image For quick (but bad) image results test with 5 steps (default 50) and 1 sample -image. This will let you know that everything is set up correctly. -Then increase steps to 100 or more for good (but slower) results. +image. This will let you know that everything is set up correctly. +Then increase steps to 100 or more for good (but slower) results. The prompt can be in quotes or not. ```Shell @@ -172,8 +255,8 @@ You'll need to experiment to see if face restoration is making it better or worse for your specific prompt. If you're on a container the output is set to the Docker volume. You can copy it -wherever you want. -You can download it from the Docker Desktop app, Volumes, my-vol, data. +wherever you want. +You can download it from the Docker Desktop app, Volumes, my-vol, data. Or you can copy it from your Mac terminal. Keep in mind `docker cp` can't expand `*.png` so you'll need to specify the image file name.