diff --git a/.dockerignore b/.dockerignore index 5df924ddee..d64ce825dc 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,19 +1,18 @@ * +!assets/caution.png !backend -!environments-and-requirements -!frontend +!frontend/dist !ldm -!main.py +!pyproject.toml +!README.md !scripts -!server -!static -!setup.py # Guard against pulling in any models that might exist in the directory tree -**/*.pt* +**.pt* # unignore configs, but only ignore the custom models.yaml, in case it exists !configs configs/models.yaml +configs/models.yaml.orig **/__pycache__ diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index e559a25087..675f6cf8f1 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -3,63 +3,59 @@ on: push: branches: - 'main' + tags: + - 'v*.*.*' jobs: docker: strategy: fail-fast: false matrix: - registry: - - ghcr.io flavor: - amd - cuda - # - cloud include: - flavor: amd - pip-requirements: requirements-lin-amd.txt + pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2' dockerfile: docker-build/Dockerfile platforms: linux/amd64,linux/arm64 - flavor: cuda - pip-requirements: requirements-lin-cuda.txt + pip-extra-index-url: '' dockerfile: docker-build/Dockerfile platforms: linux/amd64,linux/arm64 - # - flavor: cloud - # pip-requirements: requirements-lin-cuda.txt - # dockerfile: docker-build/Dockerfile.cloud - # platforms: linux/amd64 runs-on: ubuntu-latest name: ${{ matrix.flavor }} steps: - name: Checkout uses: actions/checkout@v3 - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - name: Docker meta id: meta uses: docker/metadata-action@v4 with: - images: ${{ matrix.registry }}/${{ github.repository }}-${{ matrix.flavor }} + images: ghcr.io/${{ github.repository }}-${{ matrix.flavor }} tags: | type=ref,event=branch type=ref,event=tag type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} type=sha flavor: | latest=true + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - - if: github.event_name != 'pull_request' - name: Docker login + - name: Login to GitHub Container Registry + if: github.event_name != 'pull_request' uses: docker/login-action@v2 with: - registry: ${{ matrix.registry }} - username: ${{ github.actor }} + registry: ghcr.io + username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build container @@ -71,4 +67,6 @@ jobs: push: ${{ github.event_name != 'pull_request' }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} - build-args: pip_requirements=${{ matrix.pip-requirements }} + build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }} + # cache-from: type=gha + # cache-to: type=gha,mode=max diff --git a/docker-build/Dockerfile b/docker-build/Dockerfile index 4aaf0c5d07..909f806915 100644 --- a/docker-build/Dockerfile +++ b/docker-build/Dockerfile @@ -1,59 +1,71 @@ -FROM python:3.10-slim AS builder +# syntax=docker/dockerfile:1 +FROM python:3.9-slim AS python-base # use bash SHELL [ "/bin/bash", "-c" ] # Install necesarry packages -RUN apt-get update \ +RUN \ + --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt-get update \ + && apt-get install -y \ + --no-install-recommends \ + libgl1-mesa-glx=20.3.* \ + libglib2.0-0=2.66.* \ + libopencv-dev=4.5.* \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +ARG APPDIR=/usr/src/app +ENV APPDIR ${APPDIR} +WORKDIR ${APPDIR} + +FROM python-base AS builder + +RUN \ + --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt-get update \ && apt-get install -y \ --no-install-recommends \ gcc=4:10.2.* \ - libgl1-mesa-glx=20.3.* \ - libglib2.0-0=2.66.* \ python3-dev=3.9.* \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -# set WORKDIR, PATH and copy sources -ARG APPDIR=/usr/src/app -WORKDIR ${APPDIR} -ENV PATH ${APPDIR}/.venv/bin:$PATH -ARG PIP_REQUIREMENTS=requirements-lin-cuda.txt -COPY . ./environments-and-requirements/${PIP_REQUIREMENTS} ./ +# copy sources +COPY --link . . +ARG PIP_EXTRA_INDEX_URL +ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL} # install requirements -RUN python3 -m venv .venv \ - && pip install \ - --upgrade \ +RUN python3 -m venv invokeai \ + && ${APPDIR}/invokeai/bin/pip \ + install \ --no-cache-dir \ - 'wheel>=0.38.4' \ - && pip install \ - --no-cache-dir \ - -r ${PIP_REQUIREMENTS} + --use-pep517 \ + . -FROM python:3.10-slim AS runtime +FROM python-base AS runtime # setup environment -ARG APPDIR=/usr/src/app -WORKDIR ${APPDIR} -COPY --from=builder ${APPDIR} . -ENV \ - PATH=${APPDIR}/.venv/bin:$PATH \ - INVOKEAI_ROOT=/data \ - INVOKE_MODEL_RECONFIGURE=--yes +COPY --link . . +COPY --from=builder ${APPDIR}/invokeai ${APPDIR}/invokeai +ENV PATH=${APPDIR}/invokeai/bin:$PATH +ENV INVOKEAI_ROOT=/data +ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only" -# Install necesarry packages -RUN apt-get update \ +# build patchmatch +RUN \ + --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt-get update \ && apt-get install -y \ --no-install-recommends \ build-essential=12.9 \ - libgl1-mesa-glx=20.3.* \ - libglib2.0-0=2.66.* \ - libopencv-dev=4.5.* \ - && ln -sf \ - /usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv4.pc \ - /usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv.pc \ - && python3 -c "from patchmatch import patch_match" \ + && PYTHONDONTWRITEBYTECODE=1 \ + python3 -c "from patchmatch import patch_match" \ && apt-get remove -y \ --autoremove \ build-essential \ @@ -61,5 +73,6 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* # set Entrypoint and default CMD -ENTRYPOINT [ "python3", "scripts/invoke.py" ] +ENTRYPOINT [ "invoke" ] CMD [ "--web", "--host=0.0.0.0" ] +VOLUME [ "/data" ] diff --git a/docker-build/build.sh b/docker-build/build.sh index 14e010d9c3..02cd279280 100755 --- a/docker-build/build.sh +++ b/docker-build/build.sh @@ -2,34 +2,41 @@ set -e # How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup +# +# Some possible pip extra-index urls (cuda 11.7 is available without extra url): +# +# CUDA 11.6: https://download.pytorch.org/whl/cu116 +# ROCm 5.2: https://download.pytorch.org/whl/rocm5.2 +# CPU: https://download.pytorch.org/whl/cpu +# +# as found on https://pytorch.org/get-started/locally/ -source ./docker-build/env.sh \ - || echo "please execute docker-build/build.sh from repository root" \ - || exit 1 +cd "$(dirname "$0")" || exit 1 -PIP_REQUIREMENTS=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt} -DOCKERFILE=${INVOKE_DOCKERFILE:-docker-build/Dockerfile} +source ./env.sh + +DOCKERFILE=${INVOKE_DOCKERFILE:-"./Dockerfile"} # print the settings echo -e "You are using these values:\n" echo -e "Dockerfile:\t ${DOCKERFILE}" -echo -e "Requirements:\t ${PIP_REQUIREMENTS}" +echo -e "extra-index-url: ${PIP_EXTRA_INDEX_URL:-none}" echo -e "Volumename:\t ${VOLUMENAME}" echo -e "arch:\t\t ${ARCH}" echo -e "Platform:\t ${PLATFORM}" echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n" if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then - echo -e "Volume already exists\n" + echo -e "Volume already exists\n" else - echo -n "createing docker volume " - docker volume create "${VOLUMENAME}" + echo -n "createing docker volume " + docker volume create "${VOLUMENAME}" fi # Build Container docker build \ - --platform="${PLATFORM}" \ - --tag="${INVOKEAI_TAG}" \ - --build-arg="PIP_REQUIREMENTS=${PIP_REQUIREMENTS}" \ - --file="${DOCKERFILE}" \ - . + --platform="${PLATFORM}" \ + --tag="${INVOKEAI_TAG}" \ + ${PIP_EXTRA_INDEX_URL:+--build-arg=PIP_EXTRA_INDEX_URL="${PIP_EXTRA_INDEX_URL}"} \ + --file="${DOCKERFILE}" \ + .. diff --git a/docker-build/env.sh b/docker-build/env.sh index fd1e6bcab3..6d2f71f9ec 100644 --- a/docker-build/env.sh +++ b/docker-build/env.sh @@ -7,4 +7,4 @@ ARCH=${ARCH:-$(uname -m)} PLATFORM=${PLATFORM:-Linux/${ARCH}} CONTAINER_FLAVOR=${CONTAINER_FLAVOR:-cuda} INVOKEAI_BRANCH=$(git branch --show) -INVOKEAI_TAG=${REPOSITORY_NAME,,}-${CONTAINER_FLAVOR}:${INVOKEAI_TAG:-${INVOKEAI_BRANCH/\//-}} +INVOKEAI_TAG=${REPOSITORY_NAME,,}-${CONTAINER_FLAVOR}:${INVOKEAI_TAG:-${INVOKEAI_BRANCH##*/}} diff --git a/docker-build/run.sh b/docker-build/run.sh index b7089fccd2..23d85d9790 100755 --- a/docker-build/run.sh +++ b/docker-build/run.sh @@ -4,17 +4,14 @@ set -e # How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container # IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!! -source ./docker-build/env.sh \ - || echo "please run from repository root" \ - || exit 1 +cd "$(dirname "$0")" || exit 1 -# check if HUGGINGFACE_TOKEN is available -# You must have accepted the terms of use for required models -HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN:?Please set your token for Huggingface as HUGGINGFACE_TOKEN} +source ./env.sh echo -e "You are using these values:\n" -echo -e "Volumename:\t ${VOLUMENAME}" -echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n" +echo -e "Volumename:\t${VOLUMENAME}" +echo -e "Invokeai_tag:\t${INVOKEAI_TAG}" +echo -e "local Models:\t${MODELSPATH:-unset}\n" docker run \ --interactive \ @@ -23,8 +20,10 @@ docker run \ --platform="$PLATFORM" \ --name="${REPOSITORY_NAME,,}" \ --hostname="${REPOSITORY_NAME,,}" \ - --mount="source=$VOLUMENAME,target=/data" \ - --env="HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}" \ + --mount=source="$VOLUMENAME",target=/data \ + ${MODELSPATH:+-u "$(id -u):$(id -g)"} \ + ${MODELSPATH:+--mount=type=bind,source=${MODELSPATH},target=/data/models} \ + ${HUGGING_FACE_HUB_TOKEN:+--env=HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}} \ --publish=9090:9090 \ --cap-add=sys_nice \ ${GPU_FLAGS:+--gpus=${GPU_FLAGS}} \ diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index 4a7b56eb5c..96f8094643 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -21,6 +21,38 @@ import ldm.invoke # global used in multiple functions (fix) infile = None +def report_model_error(opt:Namespace, e:Exception): + print(f'** An error occurred while attempting to initialize the model: "{str(e)}"') + print('** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models.') + if not str("--yes") in os.environ['INVOKE_MODEL_RECONFIGURE'].split(): + response = input('Do you want to run configure_invokeai.py to select and/or reinstall models? [y] ') + if response.startswith(('n','N')): + return + + print('configure_invokeai is launching....\n') + + # Match arguments that were set on the CLI + # only the arguments accepted by the configuration script are parsed + root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else [] + config = ["--config", opt.conf] if opt.conf is not None else [] + if os.getenv('INVOKE_MODEL_RECONFIGURE'): + yes_to_all = os.environ['INVOKE_MODEL_RECONFIGURE'].split() + else: + yes_to_all = None + previous_args = sys.argv + sys.argv = [ 'configure_invokeai' ] + sys.argv.extend(root_dir) + sys.argv.extend(config) + if yes_to_all is not None: + for argv in yes_to_all: + sys.argv.append(argv) + + import ldm.invoke.configure_invokeai as configure_invokeai + sys.exit(configure_invokeai.main()) + print('** InvokeAI will now restart') + sys.argv = previous_args + sys.exit(main()) # would rather do a os.exec(), but doesn't exist? + def main(): """Initialize command-line parsers and the diffusion model""" global infile @@ -50,10 +82,11 @@ def main(): if not args.conf: if not os.path.exists(os.path.join(Globals.root,'configs','models.yaml')): - print(f"\n** Error. The file {os.path.join(Globals.root,'configs','models.yaml')} could not be found.") - print('** Please check the location of your invokeai directory and use the --root_dir option to point to the correct path.') - print('** This script will now exit.') - sys.exit(-1) + report_model_error(opt, e) + # print(f"\n** Error. The file {os.path.join(Globals.root,'configs','models.yaml')} could not be found.") + # print('** Please check the location of your invokeai directory and use the --root_dir option to point to the correct path.') + # print('** This script will now exit.') + # sys.exit(-1) print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}') print(f'>> InvokeAI runtime directory is "{Globals.root}"') @@ -574,7 +607,7 @@ def import_model(model_path:str, gen, opt, completer): if model_path.startswith(('http:','https:','ftp:')): model_name = import_ckpt_model(model_path, gen, opt, completer) elif os.path.exists(model_path) and model_path.endswith(('.ckpt','.safetensors')) and os.path.isfile(model_path): - model_name = import_ckpt_model(model_path, gen, opt, completer) + model_name = import_ckpt_model(model_path, gen, opt, completer) elif re.match('^[\w.+-]+/[\w.+-]+$',model_path): model_name = import_diffuser_model(model_path, gen, opt, completer) elif os.path.isdir(model_path): @@ -743,7 +776,7 @@ def del_config(model_name:str, gen, opt, completer): if input(f'Remove {model_name} from the list of models known to InvokeAI? [y] ').strip().startswith(('n','N')): return - + delete_completely = input('Completely remove the model file or directory from disk? [n] ').startswith(('y','Y')) gen.model_manager.del_model(model_name,delete_files=delete_completely) gen.model_manager.commit(opt.conf) @@ -1097,34 +1130,6 @@ def write_commands(opt, file_path:str, outfilepath:str): f.write('\n'.join(commands)) print(f'>> File {outfilepath} with commands created') -def report_model_error(opt:Namespace, e:Exception): - print(f'** An error occurred while attempting to initialize the model: "{str(e)}"') - print('** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models.') - response = input('Do you want to run configure_invokeai.py to select and/or reinstall models? [y] ') - if response.startswith(('n','N')): - return - - print('configure_invokeai is launching....\n') - - # Match arguments that were set on the CLI - # only the arguments accepted by the configuration script are parsed - root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else [] - config = ["--config", opt.conf] if opt.conf is not None else [] - yes_to_all = os.environ.get('INVOKE_MODEL_RECONFIGURE') - previous_args = sys.argv - sys.argv = [ 'configure_invokeai' ] - sys.argv.extend(root_dir) - sys.argv.extend(config) - if yes_to_all is not None: - sys.argv.append(yes_to_all) - - import ldm.invoke.configure_invokeai as configure_invokeai - configure_invokeai.main() - print('** InvokeAI will now restart') - sys.argv = previous_args - main() # would rather do a os.exec(), but doesn't exist? - sys.exit(0) - def check_internet()->bool: ''' Return true if the internet is reachable.