From a328986b43cf7c53bf2714be8e7111e0318615d5 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Sat, 8 Jul 2023 04:09:10 +0300 Subject: [PATCH 01/17] Less naive model detection --- .../backend/model_management/model_manager.py | 10 +++---- .../model_management/models/__init__.py | 2 +- .../backend/model_management/models/base.py | 3 ++ .../backend/model_management/models/lora.py | 15 ++++++++-- .../models/stable_diffusion.py | 29 +++++++++++++++---- .../models/textual_inversion.py | 14 ++++++++- .../backend/model_management/models/vae.py | 15 ++++++++-- 7 files changed, 68 insertions(+), 20 deletions(-) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 03514cfeff..a8d43a6888 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -250,8 +250,8 @@ from .model_cache import ModelCache, ModelLocker from .models import ( BaseModelType, ModelType, SubModelType, ModelError, SchedulerPredictionType, MODEL_CLASSES, - ModelConfigBase, ModelNotFoundException, - ) + ModelConfigBase, ModelNotFoundException, InvalidModelException, +) # We are only starting to number the config file with release 3. # The config file version doesn't have to start at release version, but it will help @@ -275,10 +275,6 @@ class ModelInfo(): def __exit__(self,*args, **kwargs): self.context.__exit__(*args, **kwargs) -class InvalidModelError(Exception): - "Raised when an invalid model is requested" - pass - class AddModelResult(BaseModel): name: str = Field(description="The name of the model after installation") model_type: ModelType = Field(description="The type of model") @@ -817,6 +813,8 @@ class ModelManager(object): model_config: ModelConfigBase = model_class.probe_config(str(model_path)) self.models[model_key] = model_config new_models_found = True + except InvalidModelException: + self.logger.warning(f"Not a valid model: {model_path}") except NotImplementedError as e: self.logger.warning(e) diff --git a/invokeai/backend/model_management/models/__init__.py b/invokeai/backend/model_management/models/__init__.py index 1b381cd2a8..b02d85471d 100644 --- a/invokeai/backend/model_management/models/__init__.py +++ b/invokeai/backend/model_management/models/__init__.py @@ -2,7 +2,7 @@ import inspect from enum import Enum from pydantic import BaseModel from typing import Literal, get_origin -from .base import BaseModelType, ModelType, SubModelType, ModelBase, ModelConfigBase, ModelVariantType, SchedulerPredictionType, ModelError, SilenceWarnings, ModelNotFoundException +from .base import BaseModelType, ModelType, SubModelType, ModelBase, ModelConfigBase, ModelVariantType, SchedulerPredictionType, ModelError, SilenceWarnings, ModelNotFoundException, InvalidModelException from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model from .vae import VaeModel from .lora import LoRAModel diff --git a/invokeai/backend/model_management/models/base.py b/invokeai/backend/model_management/models/base.py index 57c02bce76..ddbc401e5b 100644 --- a/invokeai/backend/model_management/models/base.py +++ b/invokeai/backend/model_management/models/base.py @@ -15,6 +15,9 @@ from contextlib import suppress from pydantic import BaseModel, Field from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union +class InvalidModelException(Exception): + pass + class ModelNotFoundException(Exception): pass diff --git a/invokeai/backend/model_management/models/lora.py b/invokeai/backend/model_management/models/lora.py index 59feacde06..5387ade0e5 100644 --- a/invokeai/backend/model_management/models/lora.py +++ b/invokeai/backend/model_management/models/lora.py @@ -9,6 +9,7 @@ from .base import ( ModelType, SubModelType, classproperty, + InvalidModelException, ) # TODO: naming from ..lora import LoRAModel as LoRAModelRaw @@ -56,10 +57,18 @@ class LoRAModel(ModelBase): @classmethod def detect_format(cls, path: str): + if not os.path.exists(path): + raise ModelNotFoundException() + if os.path.isdir(path): - return LoRAModelFormat.Diffusers - else: - return LoRAModelFormat.LyCORIS + if os.path.exists(os.path.join(path, "pytorch_lora_weights.bin")): + return LoRAModelFormat.Diffusers + + if os.path.isfile(path): + if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]): + return LoRAModelFormat.LyCORIS + + raise InvalidModelException(f"Not a valid model: {path}") @classmethod def convert_if_required( diff --git a/invokeai/backend/model_management/models/stable_diffusion.py b/invokeai/backend/model_management/models/stable_diffusion.py index c98d5a0ae8..74751a40dd 100644 --- a/invokeai/backend/model_management/models/stable_diffusion.py +++ b/invokeai/backend/model_management/models/stable_diffusion.py @@ -16,6 +16,7 @@ from .base import ( SilenceWarnings, read_checkpoint_meta, classproperty, + InvalidModelException, ) from invokeai.app.services.config import InvokeAIAppConfig from omegaconf import OmegaConf @@ -98,10 +99,18 @@ class StableDiffusion1Model(DiffusersModel): @classmethod def detect_format(cls, model_path: str): + if not os.path.exists(model_path): + raise ModelNotFoundException() + if os.path.isdir(model_path): - return StableDiffusion1ModelFormat.Diffusers - else: - return StableDiffusion1ModelFormat.Checkpoint + if os.path.exists(os.path.join(model_path, "model_index.json")): + return StableDiffusion1ModelFormat.Diffusers + + if os.path.isfile(model_path): + if any([model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]): + return StableDiffusion1ModelFormat.Checkpoint + + raise InvalidModelException(f"Not a valid model: {model_path}") @classmethod def convert_if_required( @@ -200,10 +209,18 @@ class StableDiffusion2Model(DiffusersModel): @classmethod def detect_format(cls, model_path: str): + if not os.path.exists(model_path): + raise ModelNotFoundException() + if os.path.isdir(model_path): - return StableDiffusion2ModelFormat.Diffusers - else: - return StableDiffusion2ModelFormat.Checkpoint + if os.path.exists(os.path.join(model_path, "model_index.json")): + return StableDiffusion2ModelFormat.Diffusers + + if os.path.isfile(model_path): + if any([model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]): + return StableDiffusion2ModelFormat.Checkpoint + + raise InvalidModelException(f"Not a valid model: {model_path}") @classmethod def convert_if_required( diff --git a/invokeai/backend/model_management/models/textual_inversion.py b/invokeai/backend/model_management/models/textual_inversion.py index 4dcdbb24ba..9cd62bb417 100644 --- a/invokeai/backend/model_management/models/textual_inversion.py +++ b/invokeai/backend/model_management/models/textual_inversion.py @@ -9,6 +9,7 @@ from .base import ( SubModelType, classproperty, ModelNotFoundException, + InvalidModelException, ) # TODO: naming from ..lora import TextualInversionModel as TextualInversionModelRaw @@ -59,7 +60,18 @@ class TextualInversionModel(ModelBase): @classmethod def detect_format(cls, path: str): - return None + if not os.path.exists(path): + raise ModelNotFoundException() + + if os.path.isdir(path): + if os.path.exists(os.path.join(path, "learned_embeds.bin")): + return None # diffusers-ti + + if os.path.isfile(path): + if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]): + return None + + raise InvalidModelException(f"Not a valid model: {path}") @classmethod def convert_if_required( diff --git a/invokeai/backend/model_management/models/vae.py b/invokeai/backend/model_management/models/vae.py index 3f0d226687..2a5b7cff24 100644 --- a/invokeai/backend/model_management/models/vae.py +++ b/invokeai/backend/model_management/models/vae.py @@ -15,6 +15,7 @@ from .base import ( calc_model_size_by_fs, calc_model_size_by_data, classproperty, + InvalidModelException, ) from invokeai.app.services.config import InvokeAIAppConfig from diffusers.utils import is_safetensors_available @@ -75,10 +76,18 @@ class VaeModel(ModelBase): @classmethod def detect_format(cls, path: str): + if not os.path.exists(path): + raise ModelNotFoundException() + if os.path.isdir(path): - return VaeModelFormat.Diffusers - else: - return VaeModelFormat.Checkpoint + if os.path.exists(os.path.join(path, "config.json")): + return VaeModelFormat.Diffusers + + if os.path.isfile(path): + if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]): + return VaeModelFormat.Checkpoint + + raise InvalidModelException(f"Not a valid model: {path}") @classmethod def convert_if_required( From 67c8cf4bc29342c57ae979ebd69ccc9a6fcb262b Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Sat, 8 Jul 2023 14:26:25 +0300 Subject: [PATCH 02/17] Controlnet model detection --- .../backend/model_management/models/controlnet.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/invokeai/backend/model_management/models/controlnet.py b/invokeai/backend/model_management/models/controlnet.py index 9563f87afd..3b73097799 100644 --- a/invokeai/backend/model_management/models/controlnet.py +++ b/invokeai/backend/model_management/models/controlnet.py @@ -13,6 +13,7 @@ from .base import ( calc_model_size_by_fs, calc_model_size_by_data, classproperty, + InvalidModelException, ) class ControlNetModelFormat(str, Enum): @@ -73,10 +74,18 @@ class ControlNetModel(ModelBase): @classmethod def detect_format(cls, path: str): + if not os.path.exists(path): + raise ModelNotFoundException() + if os.path.isdir(path): - return ControlNetModelFormat.Diffusers - else: - return ControlNetModelFormat.Checkpoint + if os.path.exists(os.path.join(path, "config.json")): + return ControlNetModelFormat.Diffusers + + if os.path.isfile(path): + if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "pth"]]): + return ControlNetModelFormat.Checkpoint + + raise InvalidModelException(f"Not a valid model: {path}") @classmethod def convert_if_required( From 4a8172bcd0edfad7406fa06e8fdb2a53b4ee1fea Mon Sep 17 00:00:00 2001 From: Mary Hipp Rogers Date: Wed, 12 Jul 2023 13:03:39 -0400 Subject: [PATCH 03/17] disable features that are not supported yet or no longer supported (#3739) Co-authored-by: Mary Hipp --- invokeai/frontend/web/src/app/types/invokeai.ts | 2 ++ .../Parameters/Noise/ParamNoiseCollapse.tsx | 7 +++++-- .../web/src/features/system/store/configSlice.ts | 11 +++++++++-- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/invokeai/frontend/web/src/app/types/invokeai.ts b/invokeai/frontend/web/src/app/types/invokeai.ts index 0fd0120ce8..229761dabb 100644 --- a/invokeai/frontend/web/src/app/types/invokeai.ts +++ b/invokeai/frontend/web/src/app/types/invokeai.ts @@ -102,6 +102,8 @@ export type AppFeature = export type SDFeature = | 'controlNet' | 'noise' + | 'perlinNoise' + | 'noiseThreshold' | 'variation' | 'symmetry' | 'seamless' diff --git a/invokeai/frontend/web/src/features/parameters/components/Parameters/Noise/ParamNoiseCollapse.tsx b/invokeai/frontend/web/src/features/parameters/components/Parameters/Noise/ParamNoiseCollapse.tsx index 053c1cfec0..0419ecc656 100644 --- a/invokeai/frontend/web/src/features/parameters/components/Parameters/Noise/ParamNoiseCollapse.tsx +++ b/invokeai/frontend/web/src/features/parameters/components/Parameters/Noise/ParamNoiseCollapse.tsx @@ -27,6 +27,9 @@ const ParamNoiseCollapse = () => { const { t } = useTranslation(); const isNoiseEnabled = useFeatureStatus('noise').isFeatureEnabled; + const isPerlinNoiseEnabled = useFeatureStatus('perlinNoise').isFeatureEnabled; + const isNoiseThresholdEnabled = + useFeatureStatus('noiseThreshold').isFeatureEnabled; const { activeLabel } = useAppSelector(selector); @@ -42,8 +45,8 @@ const ParamNoiseCollapse = () => { - - + {isPerlinNoiseEnabled && } + {isNoiseThresholdEnabled && } ); diff --git a/invokeai/frontend/web/src/features/system/store/configSlice.ts b/invokeai/frontend/web/src/features/system/store/configSlice.ts index cf257032ff..c69d596b78 100644 --- a/invokeai/frontend/web/src/features/system/store/configSlice.ts +++ b/invokeai/frontend/web/src/features/system/store/configSlice.ts @@ -6,8 +6,15 @@ import { merge } from 'lodash-es'; export const initialConfigState: AppConfig = { shouldUpdateImagesOnConnect: false, disabledTabs: [], - disabledFeatures: [], - disabledSDFeatures: [], + disabledFeatures: ['lightbox', 'faceRestore'], + disabledSDFeatures: [ + 'variation', + 'seamless', + 'symmetry', + 'hires', + 'perlinNoise', + 'noiseThreshold', + ], canRestoreDeletedImagesFromBin: true, sd: { disabledControlNetModels: [], From f3b45d0ad9094c8881406230a514e7472dd27871 Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Sun, 25 Jun 2023 16:03:02 -0400 Subject: [PATCH 04/17] (docker) rewrite container implementation with docker-compose support - rewrite Dockerfile - add a stage to build the UI - add docker-compose.yml - add docker-entrypoint.sh such that any command may be used at runtime - docker-compose adds .env support - add a sample .env file --- .dockerignore | 23 +---- docker/.env.sample | 13 +++ docker/Dockerfile | 199 +++++++++++++++++++----------------- docker/README.md | 99 ++++++++++++++++++ docker/docker-compose.yml | 46 +++++++++ docker/docker-entrypoint.sh | 65 ++++++++++++ 6 files changed, 333 insertions(+), 112 deletions(-) create mode 100644 docker/.env.sample create mode 100644 docker/README.md create mode 100644 docker/docker-compose.yml create mode 100755 docker/docker-entrypoint.sh diff --git a/.dockerignore b/.dockerignore index cfdc7fc735..dc9b1ffaa5 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,25 +1,8 @@ -# use this file as a whitelist * !invokeai !ldm !pyproject.toml +!docker/docker-entrypoint.sh +!LICENSE -# ignore frontend/web but whitelist dist -invokeai/frontend/web/ -!invokeai/frontend/web/dist/ - -# ignore invokeai/assets but whitelist invokeai/assets/web -invokeai/assets/ -!invokeai/assets/web/ - -# Guard against pulling in any models that might exist in the directory tree -**/*.pt* -**/*.ckpt - -# Byte-compiled / optimized / DLL files -**/__pycache__/ -**/*.py[cod] - -# Distribution / packaging -**/*.egg-info/ -**/*.egg +**/__pycache__ diff --git a/docker/.env.sample b/docker/.env.sample new file mode 100644 index 0000000000..7e414ecd65 --- /dev/null +++ b/docker/.env.sample @@ -0,0 +1,13 @@ +## Make a copy of this file named `.env` and fill in the values below. +## Any environment variables supported by InvokeAI can be specified here. + +# INVOKEAI_ROOT is the path to a path on the local filesystem where InvokeAI will store data. +# Outputs will also be stored here by default. +# This **must** be an absolute path. +INVOKEAI_ROOT= + +HUGGINGFACE_TOKEN= + +## optional variables specific to the docker setup +# GPU_DRIVER=cuda +# CONTAINER_UID=1000 \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile index 1c2b991028..df2ac4bb0f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,107 +1,122 @@ -# syntax=docker/dockerfile:1 +# syntax=docker/dockerfile:1.4 -ARG PYTHON_VERSION=3.9 -################## -## base image ## -################## -FROM --platform=${TARGETPLATFORM} python:${PYTHON_VERSION}-slim AS python-base +## Builder stage -LABEL org.opencontainers.image.authors="mauwii@outlook.de" +FROM ubuntu:22.04 AS builder -# Prepare apt for buildkit cache -RUN rm -f /etc/apt/apt.conf.d/docker-clean \ - && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache +ARG DEBIAN_FRONTEND=noninteractive +RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt update && apt-get install -y \ + git \ + python3-venv \ + python3-pip \ + build-essential -# Install dependencies -RUN \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update \ - && apt-get install -y \ - --no-install-recommends \ - libgl1-mesa-glx=20.3.* \ - libglib2.0-0=2.66.* \ - libopencv-dev=4.5.* +ENV INVOKEAI_SRC=/opt/invokeai +ENV VIRTUAL_ENV=/opt/venv/invokeai -# Set working directory and env -ARG APPDIR=/usr/src -ARG APPNAME=InvokeAI -WORKDIR ${APPDIR} -ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH -# Keeps Python from generating .pyc files in the container -ENV PYTHONDONTWRITEBYTECODE 1 -# Turns off buffering for easier container logging -ENV PYTHONUNBUFFERED 1 -# Don't fall back to legacy build system -ENV PIP_USE_PEP517=1 +ENV PATH="$VIRTUAL_ENV/bin:$PATH" +ARG TORCH_VERSION=2.0.1 +ARG TORCHVISION_VERSION=0.15.2 +ARG GPU_DRIVER=cuda +ARG TARGETPLATFORM +# unused but available +ARG BUILDPLATFORM -####################### -## build pyproject ## -####################### -FROM python-base AS pyproject-builder +WORKDIR ${INVOKEAI_SRC} -# Install build dependencies -RUN \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update \ - && apt-get install -y \ - --no-install-recommends \ - build-essential=12.9 \ - gcc=4:10.2.* \ - python3-dev=3.9.* +# Install pytorch before all other pip packages +# NOTE: there are no pytorch builds for arm64 + cuda, only cpu +# x86_64/CUDA is default +RUN --mount=type=cache,target=/root/.cache/pip \ + python3 -m venv ${VIRTUAL_ENV} &&\ + if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \ + extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \ + elif [ "$GPU_DRIVER" = "rocm" ]; then \ + extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.2"; \ + else \ + extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu117"; \ + fi &&\ + pip install $extra_index_url_arg \ + torch==$TORCH_VERSION \ + torchvision==$TORCHVISION_VERSION -# Prepare pip for buildkit cache -ARG PIP_CACHE_DIR=/var/cache/buildkit/pip -ENV PIP_CACHE_DIR ${PIP_CACHE_DIR} -RUN mkdir -p ${PIP_CACHE_DIR} +# Install the local package. +# Editable mode helps use the same image for development: +# the local working copy can be bind-mounted into the image +# at path defined by ${INVOKEAI_SRC} +COPY invokeai ./invokeai +COPY pyproject.toml ./ +RUN --mount=type=cache,target=/root/.cache/pip \ + # xformers + triton fails to install on arm64 + if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \ + pip install -e ".[xformers]"; \ + else \ + pip install -e "."; \ + fi -# Create virtual environment -RUN --mount=type=cache,target=${PIP_CACHE_DIR} \ - python3 -m venv "${APPNAME}" \ - --upgrade-deps +# #### Build the Web UI ------------------------------------ -# Install requirements -COPY --link pyproject.toml . -COPY --link invokeai/version/invokeai_version.py invokeai/version/__init__.py invokeai/version/ -ARG PIP_EXTRA_INDEX_URL -ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL} -RUN --mount=type=cache,target=${PIP_CACHE_DIR} \ - "${APPNAME}"/bin/pip install . +FROM node:18 as web-builder +WORKDIR /build +COPY invokeai/frontend/web/ ./ +RUN --mount=type=cache,target=node_modules \ + npm install --include dev +RUN yarn vite build -# Install pyproject.toml -COPY --link . . -RUN --mount=type=cache,target=${PIP_CACHE_DIR} \ - "${APPNAME}/bin/pip" install . -# Build patchmatch +#### Runtime stage --------------------------------------- + +FROM library/ubuntu:22.04 as runtime + +ARG DEBIAN_FRONTEND=noninteractive +ENV PYTHONUNBUFFERED=1 +ENV PYTHONDONTWRITEBYTECODE=1 + +RUN apt update && apt install -y --no-install-recommends \ + git \ + curl \ + vim \ + tmux \ + ncdu \ + iotop \ + bzip2 \ + gosu \ + libglib2.0-0 \ + libgl1-mesa-glx \ + python3-venv \ + python3-pip \ + build-essential \ + libopencv-dev &&\ + apt-get clean && apt-get autoclean + +# globally add magic-wormhole +# for ease of transferring data to and from the container +# when running in sandboxed cloud environments; e.g. Runpod etc. +RUN pip install magic-wormhole + +ENV INVOKEAI_SRC=/opt/invokeai +ENV VIRTUAL_ENV=/opt/venv/invokeai +ENV INVOKEAI_ROOT=/invokeai +ENV PATH="$VIRTUAL_ENV/bin:$INVOKEAI_SRC:$PATH" + +# --link requires buldkit w/ dockerfile syntax 1.4 +COPY --link --from=builder ${INVOKEAI_SRC} ${INVOKEAI_SRC} +COPY --link --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV} +COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist + +WORKDIR ${INVOKEAI_SRC} + +# build patchmatch +RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc RUN python3 -c "from patchmatch import patch_match" -##################### -## runtime image ## -##################### -FROM python-base AS runtime +# Create unprivileged user and make the local dir +RUN useradd --create-home --shell /bin/bash -u 1000 --comment "container local user" invoke +RUN mkdir -p ${INVOKEAI_ROOT} && chown -R invoke:invoke ${INVOKEAI_ROOT} -# Create a new user -ARG UNAME=appuser -RUN useradd \ - --no-log-init \ - -m \ - -U \ - "${UNAME}" - -# Create volume directory -ARG VOLUME_DIR=/data -RUN mkdir -p "${VOLUME_DIR}" \ - && chown -hR "${UNAME}:${UNAME}" "${VOLUME_DIR}" - -# Setup runtime environment -USER ${UNAME}:${UNAME} -COPY --chown=${UNAME}:${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME} -ENV INVOKEAI_ROOT ${VOLUME_DIR} -ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache -ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only" -EXPOSE 9090 -ENTRYPOINT [ "invokeai" ] -CMD [ "--web", "--host", "0.0.0.0", "--port", "9090" ] -VOLUME [ "${VOLUME_DIR}" ] +COPY docker/docker-entrypoint.sh ./ +ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"] +CMD ["invokeai-web", "--host", "0.0.0.0"] diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 0000000000..5d9de0fa65 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,99 @@ +# InvokeAI Containerized + +All commands are to be run from the `docker` directory: `cd docker` + +Linux + +1. Ensure builkit is enabled in the Docker daemon settings (`/etc/docker/daemon.json`) +2. Install `docker-compose` +3. Ensure docker daemon is able to access the GPU. + +macOS + +1. Ensure Docker has at least 16GB RAM +2. Enable VirtioFS for file sharing +3. Enable `docker-compose` V2 support + +This is done via Docker Desktop preferences + +## Quickstart + +1. Make a copy of `env.sample` and name it `.env` (`cp env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. +2. `docker-compose up` + +The image will be built automatically if needed. + +The runtime directory (holding models and outputs) will be created in your home directory, under `~/invokeai`, populated with necessary content (you will be asked a couple of questions during setup) + +### Use a GPU + +- Linux is *recommended* for GPU support in Docker. +- WSL2 is *required* for Windows. +- only `x86_64` architecture is supported. + +The Docker daemon on the system must be already set up to use the GPU. In case of Linux, this involves installing `nvidia-docker-runtime` and configuring the `nvidia` runtime as default. Steps will be different for AMD. Please see Docker documentation for the most up-to-date instructions for using your GPU with Docker. + +## Customize + +Check the `.env` file. It contains environment variables for running in Docker. Fill it in with your own values. Next time you run `docker-compose up`, your custom values will be used. + +You can also set these values in `docker-compose.yml` directly, but `.env` will help avoid conflicts when code is updated. + +Example: + +``` +LOCAL_ROOT_DIR=/Volumes/HugeDrive/invokeai +HUGGINGFACE_TOKEN=the_actual_token +CONTAINER_UID=1000 +GPU_DRIVER=cuda +``` + +## Moar Customize! + +See the `docker-compose.yaml` file. The `command` instruction can be uncommented and used to run arbitrary startup commands. Some examples below. + + +#### Turn off the NSFW checker + +``` +command: + - invokeai + - --no-nsfw_check + - --web + - --host 0.0.0.0 +``` + + +### Reconfigure the runtime directory + +Can be used to download additional models from the supported model list + +In conjunction with `LOCAL_ROOT_DIR` can be also used to create bran + +``` +command: + - invokeai-configure + - --yes +``` + + +#### Run in CLI mode + +This container starts InvokeAI in web mode by default. + +Override the `command` and run `docker compose: + +``` +command: + - invoke +``` + +Then attach to the container from another terminal: + +``` +$ docker attach $(docker compose ps invokeai -q) + +invoke> +``` + +Enjoy using the `invoke>` prompt. To detach from the container, type `Ctrl+P` followed by `Ctrl+Q` (this is the escape sequence). diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml new file mode 100644 index 0000000000..b3e101430d --- /dev/null +++ b/docker/docker-compose.yml @@ -0,0 +1,46 @@ +# Copyright (c) 2023 Eugene Brodsky https://github.com/ebr + +version: '3.8' + +services: + invokeai: + image: "local/invokeai:latest" + # edit below to run on a container runtime other than nvidia-container-runtime. + # not yet tested with rocm/AMD GPUs + # Comment out the "deploy" section to run on CPU only + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [gpu] + build: + context: .. + dockerfile: docker/Dockerfile + + # variables without a default will automatically inherit from the host environment + environment: + - INVOKEAI_ROOT + - HF_HOME + + # Create a .env file in the same directory as this docker-compose.yml file + # and populate it with environment variables. See .env.sample + env_file: + - .env + + ports: + - "${INVOKEAI_PORT:-9090}:9090" + volumes: + - ${INVOKEAI_ROOT:-~/invokeai}:${INVOKEAI_ROOT:-/invokeai} + - ${HF_HOME:-~/.cache/huggingface}:${HF_HOME:-/invokeai/.cache/huggingface} + tty: true + stdin_open: true + + # # Example of running alternative commands/scripts in the container + # command: + # - bash + # - -c + # - | + # invokeai-model-install --yes --default-only --config_file ${INVOKEAI_ROOT}/config_custom.yaml + # invokeai-nodes-web --host 0.0.0.0 diff --git a/docker/docker-entrypoint.sh b/docker/docker-entrypoint.sh new file mode 100755 index 0000000000..cdf3026c87 --- /dev/null +++ b/docker/docker-entrypoint.sh @@ -0,0 +1,65 @@ +#!/bin/bash +set -e -o pipefail + +### Container entrypoint +# Runs the CMD as defined by the Dockerfile or passed to `docker run` +# Can be used to configure the runtime dir +# Bypass by using ENTRYPOINT or `--entrypoint` + +### Set INVOKEAI_ROOT pointing to a valid runtime directory +# Otherwise configure the runtime dir first. + +### Configure the InvokeAI runtime directory (done by default)): +# docker run --rm -it --configure +# or skip with --no-configure + +### Set the CONTAINER_UID envvar to match your user. +# Ensures files created in the container are owned by you: +# docker run --rm -it -v /some/path:/invokeai -e CONTAINER_UID=$(id -u) +# Default UID: 1000 chosen due to popularity on Linux systems. Possibly 501 on MacOS. + +USER_ID=${CONTAINER_UID:-1000} +USER=invoke +usermod -u ${USER_ID} ${USER} 1>/dev/null + +configure() { + # Configure the runtime directory + if [[ -f ${INVOKEAI_ROOT}/invokeai.yaml ]]; then + echo "${INVOKEAI_ROOT}/invokeai.yaml found." + echo "To reconfigure InvokeAI, please delete it." + echo "===========================================" + else + mkdir -p ${INVOKEAI_ROOT} + chown --recursive ${USER} ${INVOKEAI_ROOT} + gosu ${USER} invokeai-configure --yes + fi +} + +## Skip attempting to configure. +## Must be passed first, before any other args. +if [[ $1 != "--no-configure" ]]; then + configure +else + shift +fi + +### Set the $PUBLIC_KEY env var to enable SSH access. +# We do not install openssh-server in the image by default to avoid bloat. +# but it is useful to have the full SSH server e.g. on Runpod. +# (use SCP to copy files to/from the image, etc) +if [[ -v "PUBLIC_KEY" ]] && [[ ! -d "${HOME}/.ssh" ]]; then + apt-get update + apt-get install -y openssh-server + pushd $HOME + mkdir -p .ssh + echo ${PUBLIC_KEY} > .ssh/authorized_keys + chmod -R 700 .ssh + popd + service ssh start +fi + + +cd ${INVOKEAI_ROOT} + +# Run the CMD as the Container User (not root). +exec gosu ${USER} "$@" From 2a5737c146f884f5271737930e044a10cc2050a2 Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Mon, 26 Jun 2023 02:00:24 -0400 Subject: [PATCH 05/17] (docker) add README used by the Runpod template --- docker/runpod-readme.md | 60 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 docker/runpod-readme.md diff --git a/docker/runpod-readme.md b/docker/runpod-readme.md new file mode 100644 index 0000000000..c464480d46 --- /dev/null +++ b/docker/runpod-readme.md @@ -0,0 +1,60 @@ +# InvokeAI - A Stable Diffusion Toolkit + +Stable Diffusion distribution by InvokeAI: https://github.com/invoke-ai + +The Docker image tracks the `main` branch of the InvokeAI project, which means it includes the latest features, but may contain some bugs. + +Your working directory is mounted under the `/workspace` path inside the pod. The models are in `/workspace/invokeai/models`, and outputs are in `/workspace/invokeai/outputs`. + +> **Only the /workspace directory will persist between pod restarts!** + +> **If you _terminate_ (not just _stop_) the pod, the /workspace will be lost.** + +## Quickstart + +1. Launch a pod from this template. **It will take about 5-10 minutes to run through the initial setup**. Be patient. +1. Wait for the application to load. + - TIP: you know it's ready when the CPU usage goes idle + - You can also check the logs for a line that says "_Point your browser at..._" +1. Open the Invoke AI web UI: click the `Connect` => `connect over HTTP` button. +1. Generate some art! + +## Other things you can do + +At any point you may edit the pod configuration and set an arbitrary Docker command. For example, you could run a command to downloads some models using `curl`, or fetch some images and place them into your outputs to continue a working session. + +If you need to run *multiple commands*, define them in the Docker Command field like this: + +`bash -c "cd ${INVOKEAI_ROOT}/outputs; wormhole receive 2-foo-bar; invoke.py --web --host 0.0.0.0"` + +### Copying your data in and out of the pod + +This image includes a couple of handy tools to help you get the data into the pod (such as your custom models or embeddings), and out of the pod (such as downloading your outputs). Here are your options for getting your data in and out of the pod: + +- **SSH server**: + 1. Make sure to create and set your Public Key in the RunPod settings (follow the official instructions) + 1. Add an exposed port 22 (TCP) in the pod settings! + 1. When your pod restarts, you will see a new entry in the `Connect` dialog. Use this SSH server to `scp` or `sftp` your files as necessary, or SSH into the pod using the fully fledged SSH server. + +- [**Magic Wormhole**](https://magic-wormhole.readthedocs.io/en/latest/welcome.html): + 1. On your computer, `pip install magic-wormhole` (see above instructions for details) + 1. Connect to the command line **using the "light" SSH client** or the browser-based console. _Currently there's a bug where `wormhole` isn't available when connected to "full" SSH server, as described above_. + 1. `wormhole send /workspace/invokeai/outputs` will send the entire `outputs` directory. You can also send individual files. + 1. Once packaged, you will see a `wormhole receive <123-some-words>` command. Copy it + 1. Paste this command into the terminal on your local machine to securely download the payload. + 1. It works the same in reverse: you can `wormhole send` some models from your computer to the pod. Again, save your files somewhere in `/workspace` or they will be lost when the pod is stopped. + +- **RunPod's Cloud Sync feature** may be used to sync the persistent volume to cloud storage. You could, for example, copy the entire `/workspace` to S3, add some custom models to it, and copy it back from S3 when launching new pod configurations. Follow the Cloud Sync instructions. + + +### Disable the NSFW checker + +The NSFW checker is enabled by default. To disable it, edit the pod configuration and set the following command: + +``` +invoke --web --host 0.0.0.0 --no-nsfw_checker +``` + +--- + +Template ©2023 Eugene Brodsky [ebr](https://github.com/ebr) \ No newline at end of file From e9bc8254dd5d7ccd1c47634bfe30712e1edcdd6f Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Mon, 26 Jun 2023 02:12:11 -0400 Subject: [PATCH 06/17] (docker) add a README for the docker setup --- docker/README.md | 66 ++++++++++++++++-------------------------------- 1 file changed, 22 insertions(+), 44 deletions(-) diff --git a/docker/README.md b/docker/README.md index 5d9de0fa65..91f7fb8c51 100644 --- a/docker/README.md +++ b/docker/README.md @@ -2,28 +2,33 @@ All commands are to be run from the `docker` directory: `cd docker` -Linux +#### Linux 1. Ensure builkit is enabled in the Docker daemon settings (`/etc/docker/daemon.json`) -2. Install `docker-compose` +2. Install the `docker compose` plugin using your package manager, or follow a [tutorial](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-compose-on-ubuntu-22-04). + - The deprecated `docker-compose` (hyphenated) CLI continues to work for now. 3. Ensure docker daemon is able to access the GPU. + - You may need to install [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) -macOS +#### macOS 1. Ensure Docker has at least 16GB RAM 2. Enable VirtioFS for file sharing -3. Enable `docker-compose` V2 support +3. Enable `docker compose` V2 support This is done via Docker Desktop preferences ## Quickstart -1. Make a copy of `env.sample` and name it `.env` (`cp env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. -2. `docker-compose up` + +1. Make a copy of `env.sample` and name it `.env` (`cp env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to: + a. the desired location of the InvokeAI runtime directory, or + b. an existing, v3.0.0 compatible runtime directory. +1. `docker compose up` The image will be built automatically if needed. -The runtime directory (holding models and outputs) will be created in your home directory, under `~/invokeai`, populated with necessary content (you will be asked a couple of questions during setup) +The runtime directory (holding models and outputs) will be created in the location specified by `INVOKEAI_ROOT`. The default location is `~/invokeai`. The runtime directory will be populated with the base configs and models necessary to start generating. ### Use a GPU @@ -35,40 +40,28 @@ The Docker daemon on the system must be already set up to use the GPU. In case o ## Customize -Check the `.env` file. It contains environment variables for running in Docker. Fill it in with your own values. Next time you run `docker-compose up`, your custom values will be used. +Check the `.env.sample` file. It contains some environment variables for running in Docker. Copy it, name it `.env`, and fill it in with your own values. Next time you run `docker compose up`, your custom values will be used. -You can also set these values in `docker-compose.yml` directly, but `.env` will help avoid conflicts when code is updated. +You can also set these values in `docker compose.yml` directly, but `.env` will help avoid conflicts when code is updated. -Example: +Example (most values are optional): ``` -LOCAL_ROOT_DIR=/Volumes/HugeDrive/invokeai +INVOKEAI_ROOT=/Volumes/WorkDrive/invokeai HUGGINGFACE_TOKEN=the_actual_token CONTAINER_UID=1000 GPU_DRIVER=cuda ``` -## Moar Customize! - -See the `docker-compose.yaml` file. The `command` instruction can be uncommented and used to run arbitrary startup commands. Some examples below. - - -#### Turn off the NSFW checker - -``` -command: - - invokeai - - --no-nsfw_check - - --web - - --host 0.0.0.0 -``` +## Even Moar Customizing! +See the `docker compose.yaml` file. The `command` instruction can be uncommented and used to run arbitrary startup commands. Some examples below. ### Reconfigure the runtime directory Can be used to download additional models from the supported model list -In conjunction with `LOCAL_ROOT_DIR` can be also used to create bran +In conjunction with `INVOKEAI_ROOT` can be also used to initialize a runtime directory ``` command: @@ -76,24 +69,9 @@ command: - --yes ``` - -#### Run in CLI mode - -This container starts InvokeAI in web mode by default. - -Override the `command` and run `docker compose: +Or install models: ``` command: - - invoke -``` - -Then attach to the container from another terminal: - -``` -$ docker attach $(docker compose ps invokeai -q) - -invoke> -``` - -Enjoy using the `invoke>` prompt. To detach from the container, type `Ctrl+P` followed by `Ctrl+Q` (this is the escape sequence). + - invokeai-model-install +``` \ No newline at end of file From 72a11ec4bcd5923dd9aebd48a48412fd54020c13 Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Mon, 26 Jun 2023 02:15:57 -0400 Subject: [PATCH 07/17] (docker) use docker-compose in deprecated build scripts temporarily retaining the build scripts for backwards compatibility --- docker/build.sh | 49 +------------------------------------------- docker/env.sh | 54 ------------------------------------------------- docker/run.sh | 37 ++------------------------------- 3 files changed, 3 insertions(+), 137 deletions(-) delete mode 100644 docker/env.sh diff --git a/docker/build.sh b/docker/build.sh index 8bfb9a9ddc..abca29f684 100755 --- a/docker/build.sh +++ b/docker/build.sh @@ -1,51 +1,4 @@ #!/usr/bin/env bash set -e -# If you want to build a specific flavor, set the CONTAINER_FLAVOR environment variable -# e.g. CONTAINER_FLAVOR=cpu ./build.sh -# Possible Values are: -# - cpu -# - cuda -# - rocm -# Don't forget to also set it when executing run.sh -# if it is not set, the script will try to detect the flavor by itself. -# -# Doc can be found here: -# https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/ - -SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}") -cd "$SCRIPTDIR" || exit 1 - -source ./env.sh - -DOCKERFILE=${INVOKE_DOCKERFILE:-./Dockerfile} - -# print the settings -echo -e "You are using these values:\n" -echo -e "Dockerfile:\t\t${DOCKERFILE}" -echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}" -echo -e "Volumename:\t\t${VOLUMENAME}" -echo -e "Platform:\t\t${PLATFORM}" -echo -e "Container Registry:\t${CONTAINER_REGISTRY}" -echo -e "Container Repository:\t${CONTAINER_REPOSITORY}" -echo -e "Container Tag:\t\t${CONTAINER_TAG}" -echo -e "Container Flavor:\t${CONTAINER_FLAVOR}" -echo -e "Container Image:\t${CONTAINER_IMAGE}\n" - -# Create docker volume -if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then - echo -e "Volume already exists\n" -else - echo -n "creating docker volume " - docker volume create "${VOLUMENAME}" -fi - -# Build Container -docker build \ - --platform="${PLATFORM:-linux/amd64}" \ - --tag="${CONTAINER_IMAGE:-invokeai}" \ - ${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \ - ${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \ - ${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \ - --file="${DOCKERFILE}" \ - .. +docker-compose build diff --git a/docker/env.sh b/docker/env.sh deleted file mode 100644 index ee3b54f5f6..0000000000 --- a/docker/env.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bash - -# This file is used to set environment variables for the build.sh and run.sh scripts. - -# Try to detect the container flavor if no PIP_EXTRA_INDEX_URL got specified -if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then - - # Activate virtual environment if not already activated and exists - if [[ -z $VIRTUAL_ENV ]]; then - [[ -e "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" ]] \ - && source "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" \ - && echo "Activated virtual environment: $VIRTUAL_ENV" - fi - - # Decide which container flavor to build if not specified - if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then - # Check for CUDA and ROCm - CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())") - ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)") - if [[ "${CUDA_AVAILABLE}" == "True" ]]; then - CONTAINER_FLAVOR="cuda" - elif [[ "${ROCM_AVAILABLE}" == "True" ]]; then - CONTAINER_FLAVOR="rocm" - else - CONTAINER_FLAVOR="cpu" - fi - fi - - # Set PIP_EXTRA_INDEX_URL based on container flavor - if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then - PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm" - elif [[ "$CONTAINER_FLAVOR" == "cpu" ]]; then - PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu" - # elif [[ -z "$CONTAINER_FLAVOR" || "$CONTAINER_FLAVOR" == "cuda" ]]; then - # PIP_PACKAGE=${PIP_PACKAGE-".[xformers]"} - fi -fi - -# Variables shared by build.sh and run.sh -REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}" -REPOSITORY_NAME="${REPOSITORY_NAME,,}" -VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME}_data"}" -ARCH="${ARCH-$(uname -m)}" -PLATFORM="${PLATFORM-linux/${ARCH}}" -INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}" -CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}" -CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}" -CONTAINER_FLAVOR="${CONTAINER_FLAVOR-cuda}" -CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}" -CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}" -CONTAINER_IMAGE="${CONTAINER_IMAGE,,}" - -# enable docker buildkit -export DOCKER_BUILDKIT=1 diff --git a/docker/run.sh b/docker/run.sh index d685788146..bb764ec022 100755 --- a/docker/run.sh +++ b/docker/run.sh @@ -1,41 +1,8 @@ #!/usr/bin/env bash set -e -# How to use: https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/ - SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}") cd "$SCRIPTDIR" || exit 1 -source ./env.sh - -# Create outputs directory if it does not exist -[[ -d ./outputs ]] || mkdir ./outputs - -echo -e "You are using these values:\n" -echo -e "Volumename:\t${VOLUMENAME}" -echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}" -echo -e "local Models:\t${MODELSPATH:-unset}\n" - -docker run \ - --interactive \ - --tty \ - --rm \ - --platform="${PLATFORM}" \ - --name="${REPOSITORY_NAME}" \ - --hostname="${REPOSITORY_NAME}" \ - --mount type=volume,volume-driver=local,source="${VOLUMENAME}",target=/data \ - --mount type=bind,source="$(pwd)"/outputs/,target=/data/outputs/ \ - ${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \ - ${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \ - --publish=9090:9090 \ - --cap-add=sys_nice \ - ${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \ - "${CONTAINER_IMAGE}" ${@:+$@} - -echo -e "\nCleaning trash folder ..." -for f in outputs/.Trash*; do - if [ -e "$f" ]; then - rm -Rf "$f" - break - fi -done +docker-compose up --build -d +docker-compose logs -f From 6fbd6439480095f8412e761dae7a56adaf56e457 Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Mon, 26 Jun 2023 15:43:42 -0400 Subject: [PATCH 08/17] (docker) tidy up dockerignore --- .dockerignore | 3 --- 1 file changed, 3 deletions(-) diff --git a/.dockerignore b/.dockerignore index dc9b1ffaa5..907159c12e 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,8 +1,5 @@ * !invokeai -!ldm !pyproject.toml !docker/docker-entrypoint.sh !LICENSE - -**/__pycache__ From 3b1eeda4d40003669ceabc579a3117a35fa0c3ea Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Mon, 26 Jun 2023 18:01:22 -0400 Subject: [PATCH 09/17] (docker) only install default models when running the container against a new runtime directory --- docker/docker-entrypoint.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/docker-entrypoint.sh b/docker/docker-entrypoint.sh index cdf3026c87..d45ed3d7ef 100755 --- a/docker/docker-entrypoint.sh +++ b/docker/docker-entrypoint.sh @@ -25,13 +25,13 @@ usermod -u ${USER_ID} ${USER} 1>/dev/null configure() { # Configure the runtime directory if [[ -f ${INVOKEAI_ROOT}/invokeai.yaml ]]; then - echo "${INVOKEAI_ROOT}/invokeai.yaml found." - echo "To reconfigure InvokeAI, please delete it." - echo "===========================================" + echo "${INVOKEAI_ROOT}/invokeai.yaml exists. InvokeAI is already configured." + echo "To reconfigure InvokeAI, delete the above file." + echo "======================================================================" else mkdir -p ${INVOKEAI_ROOT} chown --recursive ${USER} ${INVOKEAI_ROOT} - gosu ${USER} invokeai-configure --yes + gosu ${USER} invokeai-configure --yes --default_only fi } From 674f42ba9af16898cc7834084aedd9ec863dbbe7 Mon Sep 17 00:00:00 2001 From: Brandon Rising Date: Tue, 27 Jun 2023 00:33:58 -0400 Subject: [PATCH 10/17] Pass env vars as build-args, ensure node modules isn't getting passed in --- .dockerignore | 2 ++ docker/Dockerfile | 2 +- docker/build.sh | 9 ++++++++- docker/docker-compose.yml | 2 ++ 4 files changed, 13 insertions(+), 2 deletions(-) diff --git a/.dockerignore b/.dockerignore index 907159c12e..598a11e1eb 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,3 +3,5 @@ !pyproject.toml !docker/docker-entrypoint.sh !LICENSE + +node_modules \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile index df2ac4bb0f..eff558563f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -21,7 +21,7 @@ ENV PATH="$VIRTUAL_ENV/bin:$PATH" ARG TORCH_VERSION=2.0.1 ARG TORCHVISION_VERSION=0.15.2 ARG GPU_DRIVER=cuda -ARG TARGETPLATFORM +ARG TARGETPLATFORM="linux/amd64" # unused but available ARG BUILDPLATFORM diff --git a/docker/build.sh b/docker/build.sh index abca29f684..41745f02bb 100755 --- a/docker/build.sh +++ b/docker/build.sh @@ -1,4 +1,11 @@ #!/usr/bin/env bash set -e -docker-compose build +build_args="" + +[[ -f ".env" ]] && build_args=$(awk '$1 ~ /\=/ {print "--build-arg " $0 " "}' .env) + +echo "docker-compose build args:" +echo $build_args + +docker-compose build $build_args diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index b3e101430d..85deac428e 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -34,6 +34,8 @@ services: volumes: - ${INVOKEAI_ROOT:-~/invokeai}:${INVOKEAI_ROOT:-/invokeai} - ${HF_HOME:-~/.cache/huggingface}:${HF_HOME:-/invokeai/.cache/huggingface} + # - ${INVOKEAI_MODELS_DIR:-${INVOKEAI_ROOT:-/invokeai/models}} + # - ${INVOKEAI_MODELS_CONFIG_PATH:-${INVOKEAI_ROOT:-/invokeai/configs/models.yaml}} tty: true stdin_open: true From a674fff17a0081adc579ba7ea686ff5729ab9a40 Mon Sep 17 00:00:00 2001 From: Brandon Rising Date: Tue, 27 Jun 2023 11:25:41 -0400 Subject: [PATCH 11/17] Update dockerignore, set venv to 3.10, pass cache to yarn vite buidl --- .dockerignore | 4 +++- docker/Dockerfile | 5 +++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.dockerignore b/.dockerignore index 598a11e1eb..3f674f978d 100644 --- a/.dockerignore +++ b/.dockerignore @@ -4,4 +4,6 @@ !docker/docker-entrypoint.sh !LICENSE -node_modules \ No newline at end of file +**/node_modules +**/__pycache__ +**/*.egg-info \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile index eff558563f..f3d7e4a59f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -10,7 +10,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt update && apt-get install -y \ git \ - python3-venv \ + python3.10-venv \ python3-pip \ build-essential @@ -64,7 +64,8 @@ WORKDIR /build COPY invokeai/frontend/web/ ./ RUN --mount=type=cache,target=node_modules \ npm install --include dev -RUN yarn vite build +RUN --mount=type=cache,target=node_modules \ + yarn vite build #### Runtime stage --------------------------------------- From c910376bb635f256bfbe859e488374d7be93e8b0 Mon Sep 17 00:00:00 2001 From: Brandon Rising Date: Tue, 27 Jun 2023 11:53:48 -0400 Subject: [PATCH 12/17] Don't use .env file lines where = is at the end of the line --- docker/build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/build.sh b/docker/build.sh index 41745f02bb..db25439840 100755 --- a/docker/build.sh +++ b/docker/build.sh @@ -3,7 +3,7 @@ set -e build_args="" -[[ -f ".env" ]] && build_args=$(awk '$1 ~ /\=/ {print "--build-arg " $0 " "}' .env) +[[ -f ".env" ]] && build_args=$(awk '$1 ~ /\=[^$]/ {print "--build-arg " $0 " "}' .env) echo "docker-compose build args:" echo $build_args From 7111db2e0dfd570543b4b8e33b8b260dfbddd4ce Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Wed, 28 Jun 2023 14:00:06 -0400 Subject: [PATCH 13/17] (ci) fix container build workflow --- .github/workflows/build-container.yml | 76 ++++++++++++--------------- 1 file changed, 35 insertions(+), 41 deletions(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 23d7c82fe3..54d66ce961 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -3,17 +3,16 @@ on: push: branches: - 'main' - - 'update/ci/docker/*' - - 'update/docker/*' - - 'dev/ci/docker/*' - - 'dev/docker/*' + - 'ebr/container-fixes' paths: - 'pyproject.toml' - '.dockerignore' - 'invokeai/**' - 'docker/Dockerfile' + - 'docker/docker-entrypoint.sh' + - 'workflows/build-container.yml' tags: - - 'v*.*.*' + - 'v*' workflow_dispatch: permissions: @@ -26,22 +25,18 @@ jobs: strategy: fail-fast: false matrix: - flavor: - - rocm - - cuda - - cpu - include: - - flavor: rocm - pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2' - - flavor: cuda - pip-extra-index-url: '' - - flavor: cpu - pip-extra-index-url: 'https://download.pytorch.org/whl/cpu' + gpu-driver: + - cuda + - cpu + - rocm runs-on: ubuntu-latest - name: ${{ matrix.flavor }} + name: ${{ matrix.gpu-driver }} env: - PLATFORMS: 'linux/amd64,linux/arm64' - DOCKERFILE: 'docker/Dockerfile' + # torch/arm64 does not support GPU currently, so arm64 builds + # would not be GPU-accelerated. + # re-enable arm64 if there is sufficient demand. + # PLATFORMS: 'linux/amd64,linux/arm64' + PLATFORMS: 'linux/amd64' steps: - name: Checkout uses: actions/checkout@v3 @@ -53,7 +48,7 @@ jobs: github-token: ${{ secrets.GITHUB_TOKEN }} images: | ghcr.io/${{ github.repository }} - ${{ vars.DOCKERHUB_REPOSITORY }} + ${{ env.DOCKERHUB_REPOSITORY }} tags: | type=ref,event=branch type=ref,event=tag @@ -62,8 +57,8 @@ jobs: type=pep440,pattern={{major}} type=sha,enable=true,prefix=sha-,format=short flavor: | - latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }} - suffix=-${{ matrix.flavor }},onlatest=false + latest=${{ matrix.gpu-driver == 'cuda' && github.ref == 'refs/heads/main' }} + suffix=-${{ matrix.gpu-driver }},onlatest=false - name: Set up QEMU uses: docker/setup-qemu-action@v2 @@ -81,34 +76,33 @@ jobs: username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Login to Docker Hub - if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != '' - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + # - name: Login to Docker Hub + # if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != '' + # uses: docker/login-action@v2 + # with: + # username: ${{ secrets.DOCKERHUB_USERNAME }} + # password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build container id: docker_build uses: docker/build-push-action@v4 with: context: . - file: ${{ env.DOCKERFILE }} + file: docker/Dockerfile platforms: ${{ env.PLATFORMS }} push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} - build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }} cache-from: | - type=gha,scope=${{ github.ref_name }}-${{ matrix.flavor }} - type=gha,scope=main-${{ matrix.flavor }} - cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.flavor }} + type=gha,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }} + type=gha,scope=main-${{ matrix.gpu-driver }} + cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }} - - name: Docker Hub Description - if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != '' - uses: peter-evans/dockerhub-description@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - repository: ${{ vars.DOCKERHUB_REPOSITORY }} - short-description: ${{ github.event.repository.description }} + # - name: Docker Hub Description + # if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != '' + # uses: peter-evans/dockerhub-description@v3 + # with: + # username: ${{ secrets.DOCKERHUB_USERNAME }} + # password: ${{ secrets.DOCKERHUB_TOKEN }} + # repository: ${{ vars.DOCKERHUB_REPOSITORY }} + # short-description: ${{ github.event.repository.description }} From d6da7ad92223e9f0dcb8b8e6b335f5751471b490 Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Tue, 11 Jul 2023 20:57:33 -0400 Subject: [PATCH 14/17] (docker) dockerfile fixes including PR feedback When previously using base Debian-ish images, the Invoke image failed to find CUDA drivers on some RHEL-ish distributions --- docker/Dockerfile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index f3d7e4a59f..8da083edf8 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -2,7 +2,7 @@ ## Builder stage -FROM ubuntu:22.04 AS builder +FROM library/ubuntu:22.04 AS builder ARG DEBIAN_FRONTEND=noninteractive RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache @@ -37,7 +37,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \ elif [ "$GPU_DRIVER" = "rocm" ]; then \ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.2"; \ else \ - extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu117"; \ + extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu118"; \ fi &&\ pip install $extra_index_url_arg \ torch==$TORCH_VERSION \ @@ -59,18 +59,18 @@ RUN --mount=type=cache,target=/root/.cache/pip \ # #### Build the Web UI ------------------------------------ -FROM node:18 as web-builder +FROM node:18 AS web-builder WORKDIR /build COPY invokeai/frontend/web/ ./ -RUN --mount=type=cache,target=node_modules \ +RUN --mount=type=cache,target=/usr/lib/node_modules \ npm install --include dev -RUN --mount=type=cache,target=node_modules \ +RUN --mount=type=cache,target=/usr/lib/node_modules \ yarn vite build #### Runtime stage --------------------------------------- -FROM library/ubuntu:22.04 as runtime +FROM library/ubuntu:22.04 AS runtime ARG DEBIAN_FRONTEND=noninteractive ENV PYTHONUNBUFFERED=1 From a9d7ce8ca4edb360dd57ca01b06dd1498cbe39d9 Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Tue, 11 Jul 2023 22:11:45 -0400 Subject: [PATCH 15/17] (ci) free up disk space on GHA runners --- .github/workflows/build-container.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 54d66ce961..58e56eb1ee 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -38,6 +38,14 @@ jobs: # PLATFORMS: 'linux/amd64,linux/arm64' PLATFORMS: 'linux/amd64' steps: + - name: Free up more disk space on the runner + # https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930 + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + sudo swapoff /mnt/swapfile + sudo rm -rf /mnt/swapfile + - name: Checkout uses: actions/checkout@v3 From 0b0efa82e9057307d6fd7884dd54f65ebc3ccb79 Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Tue, 11 Jul 2023 23:01:29 -0400 Subject: [PATCH 16/17] (docker) ROCm support fixes - contributed by @Rubonnek --- docker/Dockerfile | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 8da083edf8..e158c681a4 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -35,7 +35,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \ if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \ elif [ "$GPU_DRIVER" = "rocm" ]; then \ - extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.2"; \ + extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.4.2"; \ else \ extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu118"; \ fi &&\ @@ -90,7 +90,8 @@ RUN apt update && apt install -y --no-install-recommends \ python3-venv \ python3-pip \ build-essential \ - libopencv-dev &&\ + libopencv-dev \ + libstdc++-10-dev &&\ apt-get clean && apt-get autoclean # globally add magic-wormhole @@ -108,6 +109,11 @@ COPY --link --from=builder ${INVOKEAI_SRC} ${INVOKEAI_SRC} COPY --link --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV} COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist +# Link amdgpu.ids for ROCm builds +# contributed by https://github.com/Rubonnek +RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\ + ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids" + WORKDIR ${INVOKEAI_SRC} # build patchmatch From 212156cb15ff3d16caf37ab67406e24fab2e5e7d Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Wed, 12 Jul 2023 12:07:50 -0400 Subject: [PATCH 17/17] (ci) remove testing branch --- .github/workflows/build-container.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 58e56eb1ee..60eba4a297 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -3,7 +3,6 @@ on: push: branches: - 'main' - - 'ebr/container-fixes' paths: - 'pyproject.toml' - '.dockerignore'