mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
(docker) rewrite container implementation with docker-compose support
- rewrite Dockerfile - add a stage to build the UI - add docker-compose.yml - add docker-entrypoint.sh such that any command may be used at runtime - docker-compose adds .env support - add a sample .env file
This commit is contained in:
parent
4a8172bcd0
commit
f3b45d0ad9
@ -1,25 +1,8 @@
|
|||||||
# use this file as a whitelist
|
|
||||||
*
|
*
|
||||||
!invokeai
|
!invokeai
|
||||||
!ldm
|
!ldm
|
||||||
!pyproject.toml
|
!pyproject.toml
|
||||||
|
!docker/docker-entrypoint.sh
|
||||||
|
!LICENSE
|
||||||
|
|
||||||
# ignore frontend/web but whitelist dist
|
**/__pycache__
|
||||||
invokeai/frontend/web/
|
|
||||||
!invokeai/frontend/web/dist/
|
|
||||||
|
|
||||||
# ignore invokeai/assets but whitelist invokeai/assets/web
|
|
||||||
invokeai/assets/
|
|
||||||
!invokeai/assets/web/
|
|
||||||
|
|
||||||
# Guard against pulling in any models that might exist in the directory tree
|
|
||||||
**/*.pt*
|
|
||||||
**/*.ckpt
|
|
||||||
|
|
||||||
# Byte-compiled / optimized / DLL files
|
|
||||||
**/__pycache__/
|
|
||||||
**/*.py[cod]
|
|
||||||
|
|
||||||
# Distribution / packaging
|
|
||||||
**/*.egg-info/
|
|
||||||
**/*.egg
|
|
||||||
|
13
docker/.env.sample
Normal file
13
docker/.env.sample
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
## Make a copy of this file named `.env` and fill in the values below.
|
||||||
|
## Any environment variables supported by InvokeAI can be specified here.
|
||||||
|
|
||||||
|
# INVOKEAI_ROOT is the path to a path on the local filesystem where InvokeAI will store data.
|
||||||
|
# Outputs will also be stored here by default.
|
||||||
|
# This **must** be an absolute path.
|
||||||
|
INVOKEAI_ROOT=
|
||||||
|
|
||||||
|
HUGGINGFACE_TOKEN=
|
||||||
|
|
||||||
|
## optional variables specific to the docker setup
|
||||||
|
# GPU_DRIVER=cuda
|
||||||
|
# CONTAINER_UID=1000
|
@ -1,107 +1,122 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1.4
|
||||||
|
|
||||||
ARG PYTHON_VERSION=3.9
|
## Builder stage
|
||||||
##################
|
|
||||||
## base image ##
|
|
||||||
##################
|
|
||||||
FROM --platform=${TARGETPLATFORM} python:${PYTHON_VERSION}-slim AS python-base
|
|
||||||
|
|
||||||
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
FROM ubuntu:22.04 AS builder
|
||||||
|
|
||||||
# Prepare apt for buildkit cache
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||||
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
|
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
|
apt update && apt-get install -y \
|
||||||
|
git \
|
||||||
|
python3-venv \
|
||||||
|
python3-pip \
|
||||||
|
build-essential
|
||||||
|
|
||||||
# Install dependencies
|
ENV INVOKEAI_SRC=/opt/invokeai
|
||||||
RUN \
|
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
||||||
apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libgl1-mesa-glx=20.3.* \
|
|
||||||
libglib2.0-0=2.66.* \
|
|
||||||
libopencv-dev=4.5.*
|
|
||||||
|
|
||||||
# Set working directory and env
|
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||||
ARG APPDIR=/usr/src
|
ARG TORCH_VERSION=2.0.1
|
||||||
ARG APPNAME=InvokeAI
|
ARG TORCHVISION_VERSION=0.15.2
|
||||||
WORKDIR ${APPDIR}
|
ARG GPU_DRIVER=cuda
|
||||||
ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH
|
ARG TARGETPLATFORM
|
||||||
# Keeps Python from generating .pyc files in the container
|
# unused but available
|
||||||
ENV PYTHONDONTWRITEBYTECODE 1
|
ARG BUILDPLATFORM
|
||||||
# Turns off buffering for easier container logging
|
|
||||||
ENV PYTHONUNBUFFERED 1
|
|
||||||
# Don't fall back to legacy build system
|
|
||||||
ENV PIP_USE_PEP517=1
|
|
||||||
|
|
||||||
#######################
|
WORKDIR ${INVOKEAI_SRC}
|
||||||
## build pyproject ##
|
|
||||||
#######################
|
|
||||||
FROM python-base AS pyproject-builder
|
|
||||||
|
|
||||||
# Install build dependencies
|
# Install pytorch before all other pip packages
|
||||||
RUN \
|
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
# x86_64/CUDA is default
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||||
apt-get update \
|
python3 -m venv ${VIRTUAL_ENV} &&\
|
||||||
&& apt-get install -y \
|
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
|
||||||
--no-install-recommends \
|
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
|
||||||
build-essential=12.9 \
|
elif [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||||
gcc=4:10.2.* \
|
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.2"; \
|
||||||
python3-dev=3.9.*
|
else \
|
||||||
|
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu117"; \
|
||||||
|
fi &&\
|
||||||
|
pip install $extra_index_url_arg \
|
||||||
|
torch==$TORCH_VERSION \
|
||||||
|
torchvision==$TORCHVISION_VERSION
|
||||||
|
|
||||||
# Prepare pip for buildkit cache
|
# Install the local package.
|
||||||
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
|
# Editable mode helps use the same image for development:
|
||||||
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
|
# the local working copy can be bind-mounted into the image
|
||||||
RUN mkdir -p ${PIP_CACHE_DIR}
|
# at path defined by ${INVOKEAI_SRC}
|
||||||
|
COPY invokeai ./invokeai
|
||||||
|
COPY pyproject.toml ./
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||||
|
# xformers + triton fails to install on arm64
|
||||||
|
if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||||
|
pip install -e ".[xformers]"; \
|
||||||
|
else \
|
||||||
|
pip install -e "."; \
|
||||||
|
fi
|
||||||
|
|
||||||
# Create virtual environment
|
# #### Build the Web UI ------------------------------------
|
||||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
|
||||||
python3 -m venv "${APPNAME}" \
|
|
||||||
--upgrade-deps
|
|
||||||
|
|
||||||
# Install requirements
|
FROM node:18 as web-builder
|
||||||
COPY --link pyproject.toml .
|
WORKDIR /build
|
||||||
COPY --link invokeai/version/invokeai_version.py invokeai/version/__init__.py invokeai/version/
|
COPY invokeai/frontend/web/ ./
|
||||||
ARG PIP_EXTRA_INDEX_URL
|
RUN --mount=type=cache,target=node_modules \
|
||||||
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
npm install --include dev
|
||||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
RUN yarn vite build
|
||||||
"${APPNAME}"/bin/pip install .
|
|
||||||
|
|
||||||
# Install pyproject.toml
|
|
||||||
COPY --link . .
|
|
||||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
|
||||||
"${APPNAME}/bin/pip" install .
|
|
||||||
|
|
||||||
# Build patchmatch
|
#### Runtime stage ---------------------------------------
|
||||||
|
|
||||||
|
FROM library/ubuntu:22.04 as runtime
|
||||||
|
|
||||||
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
ENV PYTHONUNBUFFERED=1
|
||||||
|
ENV PYTHONDONTWRITEBYTECODE=1
|
||||||
|
|
||||||
|
RUN apt update && apt install -y --no-install-recommends \
|
||||||
|
git \
|
||||||
|
curl \
|
||||||
|
vim \
|
||||||
|
tmux \
|
||||||
|
ncdu \
|
||||||
|
iotop \
|
||||||
|
bzip2 \
|
||||||
|
gosu \
|
||||||
|
libglib2.0-0 \
|
||||||
|
libgl1-mesa-glx \
|
||||||
|
python3-venv \
|
||||||
|
python3-pip \
|
||||||
|
build-essential \
|
||||||
|
libopencv-dev &&\
|
||||||
|
apt-get clean && apt-get autoclean
|
||||||
|
|
||||||
|
# globally add magic-wormhole
|
||||||
|
# for ease of transferring data to and from the container
|
||||||
|
# when running in sandboxed cloud environments; e.g. Runpod etc.
|
||||||
|
RUN pip install magic-wormhole
|
||||||
|
|
||||||
|
ENV INVOKEAI_SRC=/opt/invokeai
|
||||||
|
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||||
|
ENV INVOKEAI_ROOT=/invokeai
|
||||||
|
ENV PATH="$VIRTUAL_ENV/bin:$INVOKEAI_SRC:$PATH"
|
||||||
|
|
||||||
|
# --link requires buldkit w/ dockerfile syntax 1.4
|
||||||
|
COPY --link --from=builder ${INVOKEAI_SRC} ${INVOKEAI_SRC}
|
||||||
|
COPY --link --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
||||||
|
COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist
|
||||||
|
|
||||||
|
WORKDIR ${INVOKEAI_SRC}
|
||||||
|
|
||||||
|
# build patchmatch
|
||||||
|
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
||||||
RUN python3 -c "from patchmatch import patch_match"
|
RUN python3 -c "from patchmatch import patch_match"
|
||||||
|
|
||||||
#####################
|
# Create unprivileged user and make the local dir
|
||||||
## runtime image ##
|
RUN useradd --create-home --shell /bin/bash -u 1000 --comment "container local user" invoke
|
||||||
#####################
|
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R invoke:invoke ${INVOKEAI_ROOT}
|
||||||
FROM python-base AS runtime
|
|
||||||
|
|
||||||
# Create a new user
|
COPY docker/docker-entrypoint.sh ./
|
||||||
ARG UNAME=appuser
|
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
|
||||||
RUN useradd \
|
CMD ["invokeai-web", "--host", "0.0.0.0"]
|
||||||
--no-log-init \
|
|
||||||
-m \
|
|
||||||
-U \
|
|
||||||
"${UNAME}"
|
|
||||||
|
|
||||||
# Create volume directory
|
|
||||||
ARG VOLUME_DIR=/data
|
|
||||||
RUN mkdir -p "${VOLUME_DIR}" \
|
|
||||||
&& chown -hR "${UNAME}:${UNAME}" "${VOLUME_DIR}"
|
|
||||||
|
|
||||||
# Setup runtime environment
|
|
||||||
USER ${UNAME}:${UNAME}
|
|
||||||
COPY --chown=${UNAME}:${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME}
|
|
||||||
ENV INVOKEAI_ROOT ${VOLUME_DIR}
|
|
||||||
ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache
|
|
||||||
ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only"
|
|
||||||
EXPOSE 9090
|
|
||||||
ENTRYPOINT [ "invokeai" ]
|
|
||||||
CMD [ "--web", "--host", "0.0.0.0", "--port", "9090" ]
|
|
||||||
VOLUME [ "${VOLUME_DIR}" ]
|
|
||||||
|
99
docker/README.md
Normal file
99
docker/README.md
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
# InvokeAI Containerized
|
||||||
|
|
||||||
|
All commands are to be run from the `docker` directory: `cd docker`
|
||||||
|
|
||||||
|
Linux
|
||||||
|
|
||||||
|
1. Ensure builkit is enabled in the Docker daemon settings (`/etc/docker/daemon.json`)
|
||||||
|
2. Install `docker-compose`
|
||||||
|
3. Ensure docker daemon is able to access the GPU.
|
||||||
|
|
||||||
|
macOS
|
||||||
|
|
||||||
|
1. Ensure Docker has at least 16GB RAM
|
||||||
|
2. Enable VirtioFS for file sharing
|
||||||
|
3. Enable `docker-compose` V2 support
|
||||||
|
|
||||||
|
This is done via Docker Desktop preferences
|
||||||
|
|
||||||
|
## Quickstart
|
||||||
|
|
||||||
|
1. Make a copy of `env.sample` and name it `.env` (`cp env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary.
|
||||||
|
2. `docker-compose up`
|
||||||
|
|
||||||
|
The image will be built automatically if needed.
|
||||||
|
|
||||||
|
The runtime directory (holding models and outputs) will be created in your home directory, under `~/invokeai`, populated with necessary content (you will be asked a couple of questions during setup)
|
||||||
|
|
||||||
|
### Use a GPU
|
||||||
|
|
||||||
|
- Linux is *recommended* for GPU support in Docker.
|
||||||
|
- WSL2 is *required* for Windows.
|
||||||
|
- only `x86_64` architecture is supported.
|
||||||
|
|
||||||
|
The Docker daemon on the system must be already set up to use the GPU. In case of Linux, this involves installing `nvidia-docker-runtime` and configuring the `nvidia` runtime as default. Steps will be different for AMD. Please see Docker documentation for the most up-to-date instructions for using your GPU with Docker.
|
||||||
|
|
||||||
|
## Customize
|
||||||
|
|
||||||
|
Check the `.env` file. It contains environment variables for running in Docker. Fill it in with your own values. Next time you run `docker-compose up`, your custom values will be used.
|
||||||
|
|
||||||
|
You can also set these values in `docker-compose.yml` directly, but `.env` will help avoid conflicts when code is updated.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```
|
||||||
|
LOCAL_ROOT_DIR=/Volumes/HugeDrive/invokeai
|
||||||
|
HUGGINGFACE_TOKEN=the_actual_token
|
||||||
|
CONTAINER_UID=1000
|
||||||
|
GPU_DRIVER=cuda
|
||||||
|
```
|
||||||
|
|
||||||
|
## Moar Customize!
|
||||||
|
|
||||||
|
See the `docker-compose.yaml` file. The `command` instruction can be uncommented and used to run arbitrary startup commands. Some examples below.
|
||||||
|
|
||||||
|
|
||||||
|
#### Turn off the NSFW checker
|
||||||
|
|
||||||
|
```
|
||||||
|
command:
|
||||||
|
- invokeai
|
||||||
|
- --no-nsfw_check
|
||||||
|
- --web
|
||||||
|
- --host 0.0.0.0
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Reconfigure the runtime directory
|
||||||
|
|
||||||
|
Can be used to download additional models from the supported model list
|
||||||
|
|
||||||
|
In conjunction with `LOCAL_ROOT_DIR` can be also used to create bran
|
||||||
|
|
||||||
|
```
|
||||||
|
command:
|
||||||
|
- invokeai-configure
|
||||||
|
- --yes
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
#### Run in CLI mode
|
||||||
|
|
||||||
|
This container starts InvokeAI in web mode by default.
|
||||||
|
|
||||||
|
Override the `command` and run `docker compose:
|
||||||
|
|
||||||
|
```
|
||||||
|
command:
|
||||||
|
- invoke
|
||||||
|
```
|
||||||
|
|
||||||
|
Then attach to the container from another terminal:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ docker attach $(docker compose ps invokeai -q)
|
||||||
|
|
||||||
|
invoke>
|
||||||
|
```
|
||||||
|
|
||||||
|
Enjoy using the `invoke>` prompt. To detach from the container, type `Ctrl+P` followed by `Ctrl+Q` (this is the escape sequence).
|
46
docker/docker-compose.yml
Normal file
46
docker/docker-compose.yml
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
# Copyright (c) 2023 Eugene Brodsky https://github.com/ebr
|
||||||
|
|
||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
invokeai:
|
||||||
|
image: "local/invokeai:latest"
|
||||||
|
# edit below to run on a container runtime other than nvidia-container-runtime.
|
||||||
|
# not yet tested with rocm/AMD GPUs
|
||||||
|
# Comment out the "deploy" section to run on CPU only
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
reservations:
|
||||||
|
devices:
|
||||||
|
- driver: nvidia
|
||||||
|
count: 1
|
||||||
|
capabilities: [gpu]
|
||||||
|
build:
|
||||||
|
context: ..
|
||||||
|
dockerfile: docker/Dockerfile
|
||||||
|
|
||||||
|
# variables without a default will automatically inherit from the host environment
|
||||||
|
environment:
|
||||||
|
- INVOKEAI_ROOT
|
||||||
|
- HF_HOME
|
||||||
|
|
||||||
|
# Create a .env file in the same directory as this docker-compose.yml file
|
||||||
|
# and populate it with environment variables. See .env.sample
|
||||||
|
env_file:
|
||||||
|
- .env
|
||||||
|
|
||||||
|
ports:
|
||||||
|
- "${INVOKEAI_PORT:-9090}:9090"
|
||||||
|
volumes:
|
||||||
|
- ${INVOKEAI_ROOT:-~/invokeai}:${INVOKEAI_ROOT:-/invokeai}
|
||||||
|
- ${HF_HOME:-~/.cache/huggingface}:${HF_HOME:-/invokeai/.cache/huggingface}
|
||||||
|
tty: true
|
||||||
|
stdin_open: true
|
||||||
|
|
||||||
|
# # Example of running alternative commands/scripts in the container
|
||||||
|
# command:
|
||||||
|
# - bash
|
||||||
|
# - -c
|
||||||
|
# - |
|
||||||
|
# invokeai-model-install --yes --default-only --config_file ${INVOKEAI_ROOT}/config_custom.yaml
|
||||||
|
# invokeai-nodes-web --host 0.0.0.0
|
65
docker/docker-entrypoint.sh
Executable file
65
docker/docker-entrypoint.sh
Executable file
@ -0,0 +1,65 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e -o pipefail
|
||||||
|
|
||||||
|
### Container entrypoint
|
||||||
|
# Runs the CMD as defined by the Dockerfile or passed to `docker run`
|
||||||
|
# Can be used to configure the runtime dir
|
||||||
|
# Bypass by using ENTRYPOINT or `--entrypoint`
|
||||||
|
|
||||||
|
### Set INVOKEAI_ROOT pointing to a valid runtime directory
|
||||||
|
# Otherwise configure the runtime dir first.
|
||||||
|
|
||||||
|
### Configure the InvokeAI runtime directory (done by default)):
|
||||||
|
# docker run --rm -it <this image> --configure
|
||||||
|
# or skip with --no-configure
|
||||||
|
|
||||||
|
### Set the CONTAINER_UID envvar to match your user.
|
||||||
|
# Ensures files created in the container are owned by you:
|
||||||
|
# docker run --rm -it -v /some/path:/invokeai -e CONTAINER_UID=$(id -u) <this image>
|
||||||
|
# Default UID: 1000 chosen due to popularity on Linux systems. Possibly 501 on MacOS.
|
||||||
|
|
||||||
|
USER_ID=${CONTAINER_UID:-1000}
|
||||||
|
USER=invoke
|
||||||
|
usermod -u ${USER_ID} ${USER} 1>/dev/null
|
||||||
|
|
||||||
|
configure() {
|
||||||
|
# Configure the runtime directory
|
||||||
|
if [[ -f ${INVOKEAI_ROOT}/invokeai.yaml ]]; then
|
||||||
|
echo "${INVOKEAI_ROOT}/invokeai.yaml found."
|
||||||
|
echo "To reconfigure InvokeAI, please delete it."
|
||||||
|
echo "==========================================="
|
||||||
|
else
|
||||||
|
mkdir -p ${INVOKEAI_ROOT}
|
||||||
|
chown --recursive ${USER} ${INVOKEAI_ROOT}
|
||||||
|
gosu ${USER} invokeai-configure --yes
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
## Skip attempting to configure.
|
||||||
|
## Must be passed first, before any other args.
|
||||||
|
if [[ $1 != "--no-configure" ]]; then
|
||||||
|
configure
|
||||||
|
else
|
||||||
|
shift
|
||||||
|
fi
|
||||||
|
|
||||||
|
### Set the $PUBLIC_KEY env var to enable SSH access.
|
||||||
|
# We do not install openssh-server in the image by default to avoid bloat.
|
||||||
|
# but it is useful to have the full SSH server e.g. on Runpod.
|
||||||
|
# (use SCP to copy files to/from the image, etc)
|
||||||
|
if [[ -v "PUBLIC_KEY" ]] && [[ ! -d "${HOME}/.ssh" ]]; then
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y openssh-server
|
||||||
|
pushd $HOME
|
||||||
|
mkdir -p .ssh
|
||||||
|
echo ${PUBLIC_KEY} > .ssh/authorized_keys
|
||||||
|
chmod -R 700 .ssh
|
||||||
|
popd
|
||||||
|
service ssh start
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
cd ${INVOKEAI_ROOT}
|
||||||
|
|
||||||
|
# Run the CMD as the Container User (not root).
|
||||||
|
exec gosu ${USER} "$@"
|
Loading…
Reference in New Issue
Block a user