mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
ad7917c7aa
* add docker build optimized for size; do not copy models to image useful for cloud deployments. attempts to utilize docker layer caching as effectively as possible. also some quick tools to help with building * add workflow to build cloud img in ci * push cloud image in addition to building * (ci) also tag docker images with git SHA * (docker) rework Makefile for easy cache population and local use * support the new conda-less install; further optimize docker build * (ci) clean up the build-cloud-img action * improve the Makefile for local use * move execution of invoke script from entrypoint to cmd, allows overriding the cmd if needed (e.g. in Runpod * remove unnecessary copyright statements * (docs) add a section on running InvokeAI in the cloud using Docker * (docker) add patchmatch to the cloud image; improve build caching; simplify Makefile * (docker) fix pip requirements path to use binary_installer directory
45 lines
1.6 KiB
Makefile
45 lines
1.6 KiB
Makefile
# Directory in the container where the INVOKEAI_ROOT (runtime dir) will be mounted
|
|
INVOKEAI_ROOT=/mnt/invokeai
|
|
# Host directory to contain the runtime dir. Will be mounted at INVOKEAI_ROOT path in the container
|
|
HOST_MOUNT_PATH=${HOME}/invokeai
|
|
|
|
IMAGE=local/invokeai:latest
|
|
|
|
USER=$(shell id -u)
|
|
GROUP=$(shell id -g)
|
|
|
|
# All downloaded models, config, etc will end up in ${HOST_MOUNT_PATH} on the host.
|
|
# This is consistent with the expected non-Docker behaviour.
|
|
# Contents can be moved to a persistent storage and used to prime the cache on another host.
|
|
|
|
build:
|
|
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
|
|
|
|
configure:
|
|
docker run --rm -it --runtime=nvidia --gpus=all \
|
|
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
|
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
|
${IMAGE} -c "python scripts/configure_invokeai.py"
|
|
|
|
# Run the container with the runtime dir mounted and the web server exposed on port 9090
|
|
web:
|
|
docker run --rm -it --runtime=nvidia --gpus=all \
|
|
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
|
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
|
-p 9090:9090 \
|
|
${IMAGE} -c "python scripts/invoke.py --web --host 0.0.0.0"
|
|
|
|
# Run the cli with the runtime dir mounted
|
|
cli:
|
|
docker run --rm -it --runtime=nvidia --gpus=all \
|
|
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
|
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
|
${IMAGE} -c "python scripts/invoke.py"
|
|
|
|
# Run the container with the runtime dir mounted and open a bash shell
|
|
shell:
|
|
docker run --rm -it --runtime=nvidia --gpus=all \
|
|
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} ${IMAGE} --
|
|
|
|
.PHONY: build configure web cli shell
|