mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
commit
2f70daef8f
@ -1,18 +1,21 @@
|
|||||||
# use this file as a whitelist
|
# use this file as a whitelist
|
||||||
*
|
*
|
||||||
!backend
|
|
||||||
!invokeai
|
!invokeai
|
||||||
!ldm
|
!ldm
|
||||||
!pyproject.toml
|
!pyproject.toml
|
||||||
!README.md
|
!README.md
|
||||||
!scripts
|
|
||||||
|
|
||||||
# Guard against pulling in any models that might exist in the directory tree
|
# Guard against pulling in any models that might exist in the directory tree
|
||||||
**/*.pt*
|
**/*.pt*
|
||||||
**/*.ckpt
|
**/*.ckpt
|
||||||
|
|
||||||
# whitelist frontend, but ignore node_modules
|
# ignore frontend but whitelist dist
|
||||||
invokeai/frontend/node_modules
|
invokeai/frontend/**
|
||||||
|
!invokeai/frontend/dist
|
||||||
|
|
||||||
|
# ignore invokeai/assets but whitelist invokeai/assets/web
|
||||||
|
invokeai/assets
|
||||||
|
!invokeai/assets/web
|
||||||
|
|
||||||
# ignore python cache
|
# ignore python cache
|
||||||
**/__pycache__
|
**/__pycache__
|
||||||
|
8
.github/workflows/build-container.yml
vendored
8
.github/workflows/build-container.yml
vendored
@ -47,16 +47,18 @@ jobs:
|
|||||||
type=semver,pattern={{version}}
|
type=semver,pattern={{version}}
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
type=semver,pattern={{major}}
|
type=semver,pattern={{major}}
|
||||||
type=sha
|
type=raw,value='sha'-{{sha}}-${{ matrix.flavor}}
|
||||||
|
type=raw,value={{branch}}-${{ matrix.flavor }}
|
||||||
flavor: |
|
flavor: |
|
||||||
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
|
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
|
||||||
suffix=${{ matrix.flavor }},onlatest=false
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v2
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
|
with:
|
||||||
|
platforms: ${{ matrix.platforms }}
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
@ -67,7 +69,7 @@ jobs:
|
|||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build container
|
- name: Build container
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: ${{ matrix.dockerfile }}
|
file: ${{ matrix.dockerfile }}
|
||||||
|
@ -4,21 +4,23 @@
|
|||||||
|
|
||||||
ARG PYTHON_VERSION=3.9
|
ARG PYTHON_VERSION=3.9
|
||||||
##################
|
##################
|
||||||
### base image ###
|
## base image ##
|
||||||
##################
|
##################
|
||||||
FROM python:${PYTHON_VERSION}-slim AS python-base
|
FROM python:${PYTHON_VERSION}-slim AS python-base
|
||||||
|
|
||||||
|
# prepare for buildkit cache
|
||||||
|
RUN rm -f /etc/apt/apt.conf.d/docker-clean
|
||||||
|
|
||||||
# Install necesarry packages
|
# Install necesarry packages
|
||||||
RUN \
|
RUN \
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
||||||
apt-get update \
|
apt-get update \
|
||||||
&& apt-get install -y \
|
&& apt-get install \
|
||||||
|
-yqq \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
libgl1-mesa-glx=20.3.* \
|
libgl1-mesa-glx=20.3.* \
|
||||||
libglib2.0-0=2.66.* \
|
libglib2.0-0=2.66.* \
|
||||||
libopencv-dev=4.5.* \
|
libopencv-dev=4.5.* \
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# set working directory and path
|
# set working directory and path
|
||||||
@ -27,85 +29,58 @@ ARG APPNAME=InvokeAI
|
|||||||
WORKDIR ${APPDIR}
|
WORKDIR ${APPDIR}
|
||||||
ENV PATH=${APPDIR}/${APPNAME}/bin:$PATH
|
ENV PATH=${APPDIR}/${APPNAME}/bin:$PATH
|
||||||
|
|
||||||
######################
|
#######################
|
||||||
### build frontend ###
|
## build pyproject ##
|
||||||
######################
|
#######################
|
||||||
FROM node:lts as frontend-builder
|
|
||||||
|
|
||||||
# Copy Sources
|
|
||||||
ARG APPDIR=/usr/src
|
|
||||||
WORKDIR ${APPDIR}
|
|
||||||
COPY --link . .
|
|
||||||
|
|
||||||
# install dependencies and build frontend
|
|
||||||
WORKDIR ${APPDIR}/invokeai/frontend
|
|
||||||
RUN \
|
|
||||||
--mount=type=cache,target=/usr/local/share/.cache/yarn/v6 \
|
|
||||||
yarn install \
|
|
||||||
--prefer-offline \
|
|
||||||
--frozen-lockfile \
|
|
||||||
--non-interactive \
|
|
||||||
--production=false \
|
|
||||||
&& yarn build
|
|
||||||
|
|
||||||
###################################
|
|
||||||
### install python dependencies ###
|
|
||||||
###################################
|
|
||||||
FROM python-base AS pyproject-builder
|
FROM python-base AS pyproject-builder
|
||||||
|
ENV PIP_USE_PEP517=1
|
||||||
|
|
||||||
|
# prepare for buildkit cache
|
||||||
|
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
|
||||||
|
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
|
||||||
|
RUN mkdir -p ${PIP_CACHE_DIR}
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
RUN \
|
RUN \
|
||||||
|
--mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
||||||
apt-get update \
|
apt-get update \
|
||||||
&& apt-get install -y \
|
&& apt-get install \
|
||||||
|
-yqq \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
|
build-essential=12.9 \
|
||||||
gcc=4:10.2.* \
|
gcc=4:10.2.* \
|
||||||
python3-dev=3.9.* \
|
python3-dev=3.9.* \
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# create virtual environment
|
# create virtual environment
|
||||||
RUN python3 -m venv "${APPNAME}" \
|
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||||
|
python3 -m venv "${APPNAME}" \
|
||||||
--upgrade-deps
|
--upgrade-deps
|
||||||
|
|
||||||
# copy sources
|
# copy sources
|
||||||
COPY --from=frontend-builder ${APPDIR} .
|
COPY --link . .
|
||||||
|
|
||||||
# install pyproject.toml
|
# install pyproject.toml
|
||||||
ARG PIP_EXTRA_INDEX_URL
|
ARG PIP_EXTRA_INDEX_URL
|
||||||
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
||||||
RUN --mount=type=cache,target=/root/.cache/pip,sharing=locked \
|
ARG PIP_PACKAGE=.
|
||||||
"${APPDIR}/${APPNAME}/bin/pip" install \
|
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||||
--use-pep517 \
|
"${APPDIR}/${APPNAME}/bin/pip" install ${PIP_PACKAGE}
|
||||||
.
|
|
||||||
|
# build patchmatch
|
||||||
|
RUN python3 -c "from patchmatch import patch_match"
|
||||||
|
|
||||||
#####################
|
#####################
|
||||||
### runtime image ###
|
## runtime image ##
|
||||||
#####################
|
#####################
|
||||||
FROM python-base AS runtime
|
FROM python-base AS runtime
|
||||||
|
|
||||||
# setup environment
|
# setup environment
|
||||||
COPY --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME}
|
COPY --from=pyproject-builder --link ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME}
|
||||||
ENV INVOKEAI_ROOT=/data
|
ENV INVOKEAI_ROOT=/data
|
||||||
ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
|
ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
|
||||||
|
|
||||||
# build patchmatch
|
|
||||||
RUN \
|
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
||||||
apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
build-essential=12.9 \
|
|
||||||
&& PYTHONDONTWRITEBYTECODE=1 \
|
|
||||||
python3 -c "from patchmatch import patch_match" \
|
|
||||||
&& apt-get remove -y \
|
|
||||||
--autoremove \
|
|
||||||
build-essential \
|
|
||||||
&& apt-get autoclean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# set Entrypoint and default CMD
|
# set Entrypoint and default CMD
|
||||||
ENTRYPOINT [ "invokeai" ]
|
ENTRYPOINT [ "invokeai" ]
|
||||||
CMD [ "--web", "--host=0.0.0.0" ]
|
CMD [ "--web", "--host=0.0.0.0" ]
|
||||||
|
@ -17,14 +17,14 @@ DOCKERFILE=${INVOKE_DOCKERFILE:-Dockerfile}
|
|||||||
|
|
||||||
# print the settings
|
# print the settings
|
||||||
echo -e "You are using these values:\n"
|
echo -e "You are using these values:\n"
|
||||||
echo -e "Dockerfile: \t${DOCKERFILE}"
|
echo -e "Dockerfile:\t\t${DOCKERFILE}"
|
||||||
echo -e "index-url: \t${PIP_EXTRA_INDEX_URL:-none}"
|
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
|
||||||
echo -e "Volumename: \t${VOLUMENAME}"
|
echo -e "Volumename:\t\t${VOLUMENAME}"
|
||||||
echo -e "Platform: \t${PLATFORM}"
|
echo -e "Platform:\t\t${PLATFORM}"
|
||||||
echo -e "Registry: \t${CONTAINER_REGISTRY}"
|
echo -e "Registry:\t\t${CONTAINER_REGISTRY}"
|
||||||
echo -e "Repository: \t${CONTAINER_REPOSITORY}"
|
echo -e "Repository:\t\t${CONTAINER_REPOSITORY}"
|
||||||
echo -e "Container Tag: \t${CONTAINER_TAG}"
|
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
|
||||||
echo -e "Container Image: ${CONTAINER_IMAGE}\n"
|
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
|
||||||
|
|
||||||
# Create docker volume
|
# Create docker volume
|
||||||
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
|
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
|
||||||
@ -35,9 +35,10 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Build Container
|
# Build Container
|
||||||
docker build \
|
DOCKER_BUILDKIT=1 docker build \
|
||||||
--platform="${PLATFORM}" \
|
--platform="${PLATFORM}" \
|
||||||
--tag="${CONTAINER_IMAGE}" \
|
--tag="${CONTAINER_IMAGE}" \
|
||||||
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
|
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
|
||||||
|
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
|
||||||
--file="${DOCKERFILE}" \
|
--file="${DOCKERFILE}" \
|
||||||
..
|
..
|
||||||
|
@ -2,12 +2,12 @@
|
|||||||
|
|
||||||
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
|
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
|
||||||
# Decide which container flavor to build if not specified
|
# Decide which container flavor to build if not specified
|
||||||
if [[ -z "$CONTAINER_FLAVOR" ]]; then
|
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
|
||||||
# Check for CUDA and ROCm
|
# Check for CUDA and ROCm
|
||||||
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
|
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
|
||||||
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
|
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
|
||||||
if [[ "$(uname -s)" != "Darwin" && "${CUDA_AVAILABLE}" == "True" ]]; then
|
if [[ "$(uname -s)" != "Darwin" && "${CUDA_AVAILABLE}" == "True" ]]; then
|
||||||
CONTAINER_FLAVOR=cuda
|
CONTAINER_FLAVOR="cuda"
|
||||||
elif [[ "$(uname -s)" != "Darwin" && "${ROCM_AVAILABLE}" == "True" ]]; then
|
elif [[ "$(uname -s)" != "Darwin" && "${ROCM_AVAILABLE}" == "True" ]]; then
|
||||||
CONTAINER_FLAVOR="rocm"
|
CONTAINER_FLAVOR="rocm"
|
||||||
else
|
else
|
||||||
@ -16,9 +16,11 @@ if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
|
|||||||
fi
|
fi
|
||||||
# Set PIP_EXTRA_INDEX_URL based on container flavor
|
# Set PIP_EXTRA_INDEX_URL based on container flavor
|
||||||
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
|
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
|
||||||
PIP_EXTRA_INDEX_URL="${PIP_EXTRA_INDEX_URL-"https://download.pytorch.org/whl/rocm"}"
|
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
|
||||||
elif CONTAINER_FLAVOR=cpu; then
|
elif [[ "$CONTAINER_FLAVOR" == "cpu" ]]; then
|
||||||
PIP_EXTRA_INDEX_URL="${PIP_EXTRA_INDEX_URL-"https://download.pytorch.org/whl/cpu"}"
|
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu"
|
||||||
|
# elif [[ -z "$CONTAINER_FLAVOR" || "$CONTAINER_FLAVOR" == "cuda" ]]; then
|
||||||
|
# PIP_PACKAGE=${PIP_PACKAGE-".[xformers]"}
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -30,6 +32,7 @@ PLATFORM="${PLATFORM-Linux/${ARCH}}"
|
|||||||
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
|
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
|
||||||
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
|
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
|
||||||
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
|
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
|
||||||
|
CONTAINER_FLAVOR="${CONTAINER_FLAVOR-cuda}"
|
||||||
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
|
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
|
||||||
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
|
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
|
||||||
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"
|
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"
|
||||||
|
@ -16,10 +16,6 @@ title: Installing with Docker
|
|||||||
|
|
||||||
For general use, install locally to leverage your machine's GPU.
|
For general use, install locally to leverage your machine's GPU.
|
||||||
|
|
||||||
!!! tip "For running on a cloud instance/service"
|
|
||||||
|
|
||||||
Check out the [Running InvokeAI in the cloud with Docker](#running-invokeai-in-the-cloud-with-docker) section below
|
|
||||||
|
|
||||||
## Why containers?
|
## Why containers?
|
||||||
|
|
||||||
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
|
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
|
||||||
@ -78,38 +74,40 @@ Some Suggestions of variables you may want to change besides the Token:
|
|||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||
| Environment-Variable | Default value | Description |
|
| Environment-Variable <img width="220" align="right"/> | Default value <img width="360" align="right"/> | Description |
|
||||||
| -------------------- | ----------------------------- | -------------------------------------------------------------------------------------------- |
|
| ----------------------------------------------------- | ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
|
| `HUGGING_FACE_HUB_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
|
||||||
| `REPOSITORY_NAME` | The Basename of the Repo folder | This name will used as the container repository/image name |
|
| `REPOSITORY_NAME` | The Basename of the Repo folder | This name will used as the container repository/image name |
|
||||||
| `VOLUMENAME` | `${REPOSITORY_NAME,,}_data` | Name of the Docker Volume where model files will be stored |
|
| `VOLUMENAME` | `${REPOSITORY_NAME,,}_data` | Name of the Docker Volume where model files will be stored |
|
||||||
| `ARCH` | arch of the build machine | can be changed if you want to build the image for another arch |
|
| `ARCH` | arch of the build machine | Can be changed if you want to build the image for another arch |
|
||||||
| `INVOKEAI_TAG` | latest | the Container Repository / Tag which will be used |
|
| `CONTAINER_REGISTRY` | ghcr.io | Name of the Container Registry to use for the full tag |
|
||||||
| `PIP_REQUIREMENTS` | `requirements-lin-cuda.txt` | the requirements file to use (from `environments-and-requirements`) |
|
| `CONTAINER_REPOSITORY` | `$(whoami)/${REPOSITORY_NAME}` | Name of the Container Repository |
|
||||||
| `CONTAINER_FLAVOR` | cuda | the flavor of the image, which can be changed if you build f.e. with amd requirements file. |
|
| `CONTAINER_FLAVOR` | `cuda` | The flavor of the image to built, available options are `cuda`, `rocm` and `cpu`. If you choose `rocm` or `cpu`, the extra-index-url will be selected automatically, unless you set one yourself. |
|
||||||
| `INVOKE_DOCKERFILE` | `docker-build/Dockerfile` | the Dockerfile which should be built, handy for development |
|
| `CONTAINER_TAG` | `${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}` | The Container Repository / Tag which will be used |
|
||||||
|
| `INVOKE_DOCKERFILE` | `Dockerfile` | The Dockerfile which should be built, handy for development |
|
||||||
|
| `PIP_EXTRA_INDEX_URL` | | If you want to use a custom pip-extra-index-url |
|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
#### Build the Image
|
#### Build the Image
|
||||||
|
|
||||||
I provided a build script, which is located in `docker-build/build.sh` but still
|
I provided a build script, which is located next to the Dockerfile in
|
||||||
needs to be executed from the Repository root.
|
`docker/build.sh`. It can be executed from repository root like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./docker-build/build.sh
|
./docker/build.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
The build Script not only builds the container, but also creates the docker
|
The build Script not only builds the container, but also creates the docker
|
||||||
volume if not existing yet, or if empty it will just download the models.
|
volume if not existing yet.
|
||||||
|
|
||||||
#### Run the Container
|
#### Run the Container
|
||||||
|
|
||||||
After the build process is done, you can run the container via the provided
|
After the build process is done, you can run the container via the provided
|
||||||
`docker-build/run.sh` script
|
`docker/run.sh` script
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./docker-build/run.sh
|
./docker/run.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
When used without arguments, the container will start the webserver and provide
|
When used without arguments, the container will start the webserver and provide
|
||||||
@ -119,7 +117,7 @@ also do so.
|
|||||||
!!! example "run script example"
|
!!! example "run script example"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./docker-build/run.sh "banana sushi" -Ak_lms -S42 -s10
|
./docker/run.sh "banana sushi" -Ak_lms -S42 -s10
|
||||||
```
|
```
|
||||||
|
|
||||||
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
|
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
|
||||||
@ -130,16 +128,18 @@ also do so.
|
|||||||
|
|
||||||
## Running the container on your GPU
|
## Running the container on your GPU
|
||||||
|
|
||||||
If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running the container with an extra
|
If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running
|
||||||
environment variable to enable GPU usage and have the process run much faster:
|
the container with an extra environment variable to enable GPU usage and have
|
||||||
|
the process run much faster:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
GPU_FLAGS=all ./docker-build/run.sh
|
GPU_FLAGS=all ./docker/run.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
This passes the `--gpus all` to docker and uses the GPU.
|
This passes the `--gpus all` to docker and uses the GPU.
|
||||||
|
|
||||||
If you don't have a GPU (or your host is not yet setup to use it) you will see a message like this:
|
If you don't have a GPU (or your host is not yet setup to use it) you will see a
|
||||||
|
message like this:
|
||||||
|
|
||||||
`docker: Error response from daemon: could not select device driver "" with capabilities: [[gpu]].`
|
`docker: Error response from daemon: could not select device driver "" with capabilities: [[gpu]].`
|
||||||
|
|
||||||
@ -147,84 +147,8 @@ You can use the full set of GPU combinations documented here:
|
|||||||
|
|
||||||
https://docs.docker.com/config/containers/resource_constraints/#gpu
|
https://docs.docker.com/config/containers/resource_constraints/#gpu
|
||||||
|
|
||||||
For example, use `GPU_FLAGS=device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a` to choose a specific device identified by a UUID.
|
For example, use `GPU_FLAGS=device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a` to
|
||||||
|
choose a specific device identified by a UUID.
|
||||||
## Running InvokeAI in the cloud with Docker
|
|
||||||
|
|
||||||
We offer an optimized Ubuntu-based image that has been well-tested in cloud deployments. Note: it also works well locally on Linux x86_64 systems with an Nvidia GPU. It *may* also work on Windows under WSL2 and on Intel Mac (not tested).
|
|
||||||
|
|
||||||
An advantage of this method is that it does not need any local setup or additional dependencies.
|
|
||||||
|
|
||||||
See the `docker-build/Dockerfile.cloud` file to familizarize yourself with the image's content.
|
|
||||||
|
|
||||||
### Prerequisites
|
|
||||||
|
|
||||||
- a `docker` runtime
|
|
||||||
- `make` (optional but helps for convenience)
|
|
||||||
- Huggingface token to download models, or an existing InvokeAI runtime directory from a previous installation
|
|
||||||
|
|
||||||
Neither local Python nor any dependencies are required. If you don't have `make` (part of `build-essentials` on Ubuntu), or do not wish to install it, the commands from the `docker-build/Makefile` are readily adaptable to be executed directly.
|
|
||||||
|
|
||||||
### Building and running the image locally
|
|
||||||
|
|
||||||
1. Clone this repo and `cd docker-build`
|
|
||||||
1. `make build` - this will build the image. (This does *not* require a GPU-capable system).
|
|
||||||
1. _(skip this step if you already have a complete InvokeAI runtime directory)_
|
|
||||||
- `make configure` (This does *not* require a GPU-capable system)
|
|
||||||
- this will create a local cache of models and configs (a.k.a the _runtime dir_)
|
|
||||||
- enter your Huggingface token when prompted
|
|
||||||
1. `make web`
|
|
||||||
1. Open the `http://localhost:9090` URL in your browser, and enjoy the banana sushi!
|
|
||||||
|
|
||||||
To use InvokeAI on the cli, run `make cli`. To open a Bash shell in the container for arbitraty advanced use, `make shell`.
|
|
||||||
|
|
||||||
#### Building and running without `make`
|
|
||||||
|
|
||||||
(Feel free to adapt paths such as `${HOME}/invokeai` to your liking, and modify the CLI arguments as necessary).
|
|
||||||
|
|
||||||
!!! example "Build the image and configure the runtime directory"
|
|
||||||
```Shell
|
|
||||||
cd docker-build
|
|
||||||
|
|
||||||
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
|
|
||||||
|
|
||||||
docker run --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/configure_invokeai.py"
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! example "Run the web server"
|
|
||||||
```Shell
|
|
||||||
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai -p9090:9090 local/invokeai:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
Access the Web UI at http://localhost:9090
|
|
||||||
|
|
||||||
!!! example "Run the InvokeAI interactive CLI"
|
|
||||||
```
|
|
||||||
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/invoke.py"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Running the image in the cloud
|
|
||||||
|
|
||||||
This image works anywhere you can run a container with a mounted Docker volume. You may either build this image on a cloud instance, or build and push it to your Docker registry. To manually run this on a cloud instance (such as AWS EC2, GCP or Azure VM):
|
|
||||||
|
|
||||||
1. build this image either in the cloud (you'll need to pull the repo), or locally
|
|
||||||
1. `docker tag` it as `your-registry/invokeai` and push to your registry (i.e. Dockerhub)
|
|
||||||
1. `docker pull` it on your cloud instance
|
|
||||||
1. configure the runtime directory as per above example, using `docker run ... configure_invokeai.py` script
|
|
||||||
1. use either one of the `docker run` commands above, substituting the image name for your own image.
|
|
||||||
|
|
||||||
To run this on Runpod, please refer to the following Runpod template: https://www.runpod.io/console/gpu-secure-cloud?template=vm19ukkycf (you need a Runpod subscription). When launching the template, feel free to set the image to pull your own build.
|
|
||||||
|
|
||||||
The template's `README` provides ample detail, but at a high level, the process is as follows:
|
|
||||||
|
|
||||||
1. create a pod using this Docker image
|
|
||||||
1. ensure the pod has an `INVOKEAI_ROOT=<path_to_your_persistent_volume>` environment variable, and that it corresponds to the path to your pod's persistent volume mount
|
|
||||||
1. Run the pod with `sleep infinity` as the Docker command
|
|
||||||
1. Use Runpod basic SSH to connect to the pod, and run `python scripts/configure_invokeai.py` script
|
|
||||||
1. Stop the pod, and change the Docker command to `python scripts/invoke.py --web --host 0.0.0.0`
|
|
||||||
1. Run the pod again, connect to your pod on HTTP port 9090, and enjoy the banana sushi!
|
|
||||||
|
|
||||||
Running on other cloud providers such as Vast.ai will likely work in a similar fashion.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@ -240,13 +164,12 @@ Running on other cloud providers such as Vast.ai will likely work in a similar f
|
|||||||
If you're on a **Linux container** the `invoke` script is **automatically
|
If you're on a **Linux container** the `invoke` script is **automatically
|
||||||
started** and the output dir set to the Docker volume you created earlier.
|
started** and the output dir set to the Docker volume you created earlier.
|
||||||
|
|
||||||
If you're **directly on macOS follow these startup instructions**.
|
If you're **directly on macOS follow these startup instructions**. With the
|
||||||
With the Conda environment activated (`conda activate ldm`), run the interactive
|
Conda environment activated (`conda activate ldm`), run the interactive
|
||||||
interface that combines the functionality of the original scripts `txt2img` and
|
interface that combines the functionality of the original scripts `txt2img` and
|
||||||
`img2img`:
|
`img2img`: Use the more accurate but VRAM-intensive full precision math because
|
||||||
Use the more accurate but VRAM-intensive full precision math because
|
half-precision requires autocast and won't work. By default the images are saved
|
||||||
half-precision requires autocast and won't work.
|
in `outputs/img-samples/`.
|
||||||
By default the images are saved in `outputs/img-samples/`.
|
|
||||||
|
|
||||||
```Shell
|
```Shell
|
||||||
python3 scripts/invoke.py --full_precision
|
python3 scripts/invoke.py --full_precision
|
||||||
@ -262,9 +185,9 @@ invoke> q
|
|||||||
### Text to Image
|
### Text to Image
|
||||||
|
|
||||||
For quick (but bad) image results test with 5 steps (default 50) and 1 sample
|
For quick (but bad) image results test with 5 steps (default 50) and 1 sample
|
||||||
image. This will let you know that everything is set up correctly.
|
image. This will let you know that everything is set up correctly. Then increase
|
||||||
Then increase steps to 100 or more for good (but slower) results.
|
steps to 100 or more for good (but slower) results. The prompt can be in quotes
|
||||||
The prompt can be in quotes or not.
|
or not.
|
||||||
|
|
||||||
```Shell
|
```Shell
|
||||||
invoke> The hulk fighting with sheldon cooper -s5 -n1
|
invoke> The hulk fighting with sheldon cooper -s5 -n1
|
||||||
@ -277,10 +200,9 @@ You'll need to experiment to see if face restoration is making it better or
|
|||||||
worse for your specific prompt.
|
worse for your specific prompt.
|
||||||
|
|
||||||
If you're on a container the output is set to the Docker volume. You can copy it
|
If you're on a container the output is set to the Docker volume. You can copy it
|
||||||
wherever you want.
|
wherever you want. You can download it from the Docker Desktop app, Volumes,
|
||||||
You can download it from the Docker Desktop app, Volumes, my-vol, data.
|
my-vol, data. Or you can copy it from your Mac terminal. Keep in mind
|
||||||
Or you can copy it from your Mac terminal. Keep in mind `docker cp` can't expand
|
`docker cp` can't expand `*.png` so you'll need to specify the image file name.
|
||||||
`*.png` so you'll need to specify the image file name.
|
|
||||||
|
|
||||||
On your host Mac (you can use the name of any container that mounted the
|
On your host Mac (you can use the name of any container that mounted the
|
||||||
volume):
|
volume):
|
||||||
|
Loading…
Reference in New Issue
Block a user