Fix ROCm support in Docker container

This commit is contained in:
Wilson E. Alvarez 2023-10-30 11:58:40 -04:00 committed by Kent Keirsey
parent cb6d0c8851
commit 76b3f8956b
5 changed files with 12 additions and 8 deletions

View File

@ -11,5 +11,5 @@ INVOKEAI_ROOT=
# HUGGING_FACE_HUB_TOKEN=
## optional variables specific to the docker setup.
# GPU_DRIVER=cuda
# CONTAINER_UID=1000
# GPU_DRIVER=cuda # or rocm
# CONTAINER_UID=1000

View File

@ -18,8 +18,8 @@ ENV INVOKEAI_SRC=/opt/invokeai
ENV VIRTUAL_ENV=/opt/venv/invokeai
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
ARG TORCH_VERSION=2.0.1
ARG TORCHVISION_VERSION=0.15.2
ARG TORCH_VERSION=2.1.0
ARG TORCHVISION_VERSION=0.16
ARG GPU_DRIVER=cuda
ARG TARGETPLATFORM="linux/amd64"
# unused but available
@ -35,7 +35,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
elif [ "$GPU_DRIVER" = "rocm" ]; then \
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.4.2"; \
extra_index_url_arg="--index-url https://download.pytorch.org/whl/rocm5.6"; \
else \
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu121"; \
fi &&\

View File

@ -15,6 +15,10 @@ services:
- driver: nvidia
count: 1
capabilities: [gpu]
# For AMD support, comment out the deploy section above and uncomment the devices section below:
#devices:
# - /dev/kfd:/dev/kfd
# - /dev/dri:/dev/dri
build:
context: ..
dockerfile: docker/Dockerfile

View File

@ -7,5 +7,5 @@ set -e
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
cd "$SCRIPTDIR" || exit 1
docker compose up --build -d
docker compose up -d
docker compose logs -f

View File

@ -80,8 +80,8 @@ dependencies = [
"semver~=3.0.1",
"send2trash",
"test-tube~=0.7.5",
"torch~=2.0.1",
"torchvision~=0.15.2",
"torch~=2.1.0",
"torchvision~=0.16",
"torchmetrics~=0.11.0",
"torchsde~=0.2.5",
"transformers~=4.31.0",