2023-06-25 20:03:02 +00:00
|
|
|
# Copyright (c) 2023 Eugene Brodsky https://github.com/ebr
|
|
|
|
|
|
|
|
version: '3.8'
|
|
|
|
|
|
|
|
services:
|
|
|
|
invokeai:
|
|
|
|
image: "local/invokeai:latest"
|
|
|
|
# edit below to run on a container runtime other than nvidia-container-runtime.
|
|
|
|
# not yet tested with rocm/AMD GPUs
|
|
|
|
# Comment out the "deploy" section to run on CPU only
|
|
|
|
deploy:
|
|
|
|
resources:
|
|
|
|
reservations:
|
|
|
|
devices:
|
|
|
|
- driver: nvidia
|
|
|
|
count: 1
|
|
|
|
capabilities: [gpu]
|
|
|
|
build:
|
|
|
|
context: ..
|
|
|
|
dockerfile: docker/Dockerfile
|
|
|
|
|
|
|
|
# variables without a default will automatically inherit from the host environment
|
|
|
|
environment:
|
|
|
|
- INVOKEAI_ROOT
|
|
|
|
- HF_HOME
|
|
|
|
|
|
|
|
# Create a .env file in the same directory as this docker-compose.yml file
|
|
|
|
# and populate it with environment variables. See .env.sample
|
|
|
|
env_file:
|
|
|
|
- .env
|
|
|
|
|
|
|
|
ports:
|
|
|
|
- "${INVOKEAI_PORT:-9090}:9090"
|
|
|
|
volumes:
|
|
|
|
- ${INVOKEAI_ROOT:-~/invokeai}:${INVOKEAI_ROOT:-/invokeai}
|
|
|
|
- ${HF_HOME:-~/.cache/huggingface}:${HF_HOME:-/invokeai/.cache/huggingface}
|
2023-06-27 04:33:58 +00:00
|
|
|
# - ${INVOKEAI_MODELS_DIR:-${INVOKEAI_ROOT:-/invokeai/models}}
|
|
|
|
# - ${INVOKEAI_MODELS_CONFIG_PATH:-${INVOKEAI_ROOT:-/invokeai/configs/models.yaml}}
|
2023-06-25 20:03:02 +00:00
|
|
|
tty: true
|
|
|
|
stdin_open: true
|
|
|
|
|
|
|
|
# # Example of running alternative commands/scripts in the container
|
|
|
|
# command:
|
|
|
|
# - bash
|
|
|
|
# - -c
|
|
|
|
# - |
|
|
|
|
# invokeai-model-install --yes --default-only --config_file ${INVOKEAI_ROOT}/config_custom.yaml
|
|
|
|
# invokeai-nodes-web --host 0.0.0.0
|