mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into pr/2561
This commit is contained in:
commit
fb35fe1a41
@ -3,21 +3,23 @@
|
||||
!invokeai
|
||||
!ldm
|
||||
!pyproject.toml
|
||||
!README.md
|
||||
|
||||
# Guard against pulling in any models that might exist in the directory tree
|
||||
**/*.pt*
|
||||
**/*.ckpt
|
||||
|
||||
# ignore frontend but whitelist dist
|
||||
invokeai/frontend/**
|
||||
!invokeai/frontend/dist
|
||||
invokeai/frontend/
|
||||
!invokeai/frontend/dist/
|
||||
|
||||
# ignore invokeai/assets but whitelist invokeai/assets/web
|
||||
invokeai/assets
|
||||
!invokeai/assets/web
|
||||
invokeai/assets/
|
||||
!invokeai/assets/web/
|
||||
|
||||
# ignore python cache
|
||||
**/__pycache__
|
||||
# Byte-compiled / optimized / DLL files
|
||||
**/__pycache__/
|
||||
**/*.py[cod]
|
||||
**/*.egg-info
|
||||
|
||||
# Distribution / packaging
|
||||
*.egg-info/
|
||||
*.egg
|
||||
|
56
.github/workflows/build-container.yml
vendored
56
.github/workflows/build-container.yml
vendored
@ -3,7 +3,8 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'update/ci/*'
|
||||
- 'update/ci/docker/*'
|
||||
- 'update/docker/*'
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
|
||||
@ -20,18 +21,15 @@ jobs:
|
||||
include:
|
||||
- flavor: amd
|
||||
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||
dockerfile: docker/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- flavor: cuda
|
||||
pip-extra-index-url: ''
|
||||
dockerfile: docker/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- flavor: cpu
|
||||
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||
dockerfile: docker/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
runs-on: ubuntu-latest
|
||||
name: ${{ matrix.flavor }}
|
||||
env:
|
||||
PLATFORMS: 'linux/amd64,linux/arm64'
|
||||
DOCKERFILE: 'docker/Dockerfile'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
@ -41,7 +39,9 @@ jobs:
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
images: |
|
||||
ghcr.io/${{ github.repository }}
|
||||
${{ vars.DOCKERHUB_REPOSITORY }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=tag
|
||||
@ -52,13 +52,14 @@ jobs:
|
||||
flavor: |
|
||||
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
|
||||
suffix=-${{ matrix.flavor }},onlatest=false
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
platforms: ${{ matrix.platforms }}
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: github.event_name != 'pull_request'
|
||||
@ -68,25 +69,34 @@ jobs:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build container
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
file: ${{ matrix.dockerfile }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
file: ${{ env.DOCKERFILE }}
|
||||
platforms: ${{ env.PLATFORMS }}
|
||||
push: ${{ github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
cache-from: |
|
||||
type=gha,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
||||
type=gha,scope=main-${{ matrix.flavor }}
|
||||
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
||||
|
||||
- name: Output image, digest and metadata to summary
|
||||
run: |
|
||||
{
|
||||
echo imageid: "${{ steps.docker_build.outputs.imageid }}"
|
||||
echo digest: "${{ steps.docker_build.outputs.digest }}"
|
||||
echo labels: "${{ steps.meta.outputs.labels }}"
|
||||
echo tags: "${{ steps.meta.outputs.tags }}"
|
||||
echo version: "${{ steps.meta.outputs.version }}"
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
- name: Docker Hub Description
|
||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
|
||||
uses: peter-evans/dockerhub-description@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
repository: ${{ vars.DOCKERHUB_REPOSITORY }}
|
||||
short-description: ${{ github.event.repository.description }}
|
||||
|
1
.github/workflows/test-invoke-pip.yml
vendored
1
.github/workflows/test-invoke-pip.yml
vendored
@ -8,6 +8,7 @@ on:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
|
@ -1,57 +1,63 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG PYTHON_VERSION=3.9
|
||||
##################
|
||||
## base image ##
|
||||
##################
|
||||
FROM python:${PYTHON_VERSION}-slim AS python-base
|
||||
|
||||
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
||||
|
||||
# prepare for buildkit cache
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean
|
||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
||||
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
|
||||
|
||||
# Install necesarry packages
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update \
|
||||
&& apt-get install \
|
||||
-yqq \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libgl1-mesa-glx=20.3.* \
|
||||
libglib2.0-0=2.66.* \
|
||||
libopencv-dev=4.5.* \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
libopencv-dev=4.5.*
|
||||
|
||||
# set working directory and path
|
||||
# set working directory and env
|
||||
ARG APPDIR=/usr/src
|
||||
ARG APPNAME=InvokeAI
|
||||
WORKDIR ${APPDIR}
|
||||
ENV PATH=${APPDIR}/${APPNAME}/bin:$PATH
|
||||
ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH
|
||||
# Keeps Python from generating .pyc files in the container
|
||||
ENV PYTHONDONTWRITEBYTECODE 1
|
||||
# Turns off buffering for easier container logging
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
# don't fall back to legacy build system
|
||||
ENV PIP_USE_PEP517=1
|
||||
|
||||
#######################
|
||||
## build pyproject ##
|
||||
#######################
|
||||
FROM python-base AS pyproject-builder
|
||||
ENV PIP_USE_PEP517=1
|
||||
|
||||
# prepare for buildkit cache
|
||||
# Install dependencies
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
build-essential=12.9 \
|
||||
gcc=4:10.2.* \
|
||||
python3-dev=3.9.*
|
||||
|
||||
# prepare pip for buildkit cache
|
||||
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
|
||||
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
|
||||
RUN mkdir -p ${PIP_CACHE_DIR}
|
||||
|
||||
# Install dependencies
|
||||
RUN \
|
||||
--mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
apt-get update \
|
||||
&& apt-get install \
|
||||
-yqq \
|
||||
--no-install-recommends \
|
||||
build-essential=12.9 \
|
||||
gcc=4:10.2.* \
|
||||
python3-dev=3.9.* \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# create virtual environment
|
||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
|
||||
python3 -m venv "${APPNAME}" \
|
||||
--upgrade-deps
|
||||
|
||||
@ -61,9 +67,8 @@ COPY --link . .
|
||||
# install pyproject.toml
|
||||
ARG PIP_EXTRA_INDEX_URL
|
||||
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
||||
ARG PIP_PACKAGE=.
|
||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||
"${APPDIR}/${APPNAME}/bin/pip" install ${PIP_PACKAGE}
|
||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
|
||||
"${APPNAME}/bin/pip" install .
|
||||
|
||||
# build patchmatch
|
||||
RUN python3 -c "from patchmatch import patch_match"
|
||||
@ -73,14 +78,26 @@ RUN python3 -c "from patchmatch import patch_match"
|
||||
#####################
|
||||
FROM python-base AS runtime
|
||||
|
||||
# setup environment
|
||||
COPY --from=pyproject-builder --link ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME}
|
||||
ENV INVOKEAI_ROOT=/data
|
||||
ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
|
||||
# Create a new User
|
||||
ARG UNAME=appuser
|
||||
RUN useradd \
|
||||
--no-log-init \
|
||||
-m \
|
||||
-U \
|
||||
"${UNAME}"
|
||||
|
||||
# set Entrypoint and default CMD
|
||||
# create volume directory
|
||||
ARG VOLUME_DIR=/data
|
||||
RUN mkdir -p "${VOLUME_DIR}" \
|
||||
&& chown -R "${UNAME}" "${VOLUME_DIR}"
|
||||
|
||||
# setup runtime environment
|
||||
USER ${UNAME}
|
||||
COPY --chown=${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME}
|
||||
ENV INVOKEAI_ROOT ${VOLUME_DIR}
|
||||
ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache
|
||||
ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only"
|
||||
EXPOSE 9090
|
||||
ENTRYPOINT [ "invokeai" ]
|
||||
CMD [ "--web", "--host=0.0.0.0" ]
|
||||
VOLUME [ "/data" ]
|
||||
|
||||
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
||||
CMD [ "--web", "--host", "0.0.0.0", "--port", "9090" ]
|
||||
VOLUME [ "${VOLUME_DIR}" ]
|
||||
|
@ -1,19 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
|
||||
# Some possible pip extra-index urls (cuda 11.7 is available without extra url):
|
||||
# CUDA 11.6: https://download.pytorch.org/whl/cu116
|
||||
# ROCm 5.2: https://download.pytorch.org/whl/rocm5.2
|
||||
# CPU: https://download.pytorch.org/whl/cpu
|
||||
# as found on https://pytorch.org/get-started/locally/
|
||||
# If you want to build a specific flavor, set the CONTAINER_FLAVOR environment variable
|
||||
# e.g. CONTAINER_FLAVOR=cpu ./build.sh
|
||||
# Possible Values are:
|
||||
# - cpu
|
||||
# - cuda
|
||||
# - rocm
|
||||
# Don't forget to also set it when executing run.sh
|
||||
# if it is not set, the script will try to detect the flavor by itself.
|
||||
#
|
||||
# Doc can be found here:
|
||||
# https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
||||
|
||||
SCRIPTDIR=$(dirname "$0")
|
||||
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||
cd "$SCRIPTDIR" || exit 1
|
||||
|
||||
source ./env.sh
|
||||
|
||||
DOCKERFILE=${INVOKE_DOCKERFILE:-Dockerfile}
|
||||
DOCKERFILE=${INVOKE_DOCKERFILE:-./Dockerfile}
|
||||
|
||||
# print the settings
|
||||
echo -e "You are using these values:\n"
|
||||
@ -21,9 +26,10 @@ echo -e "Dockerfile:\t\t${DOCKERFILE}"
|
||||
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
|
||||
echo -e "Volumename:\t\t${VOLUMENAME}"
|
||||
echo -e "Platform:\t\t${PLATFORM}"
|
||||
echo -e "Registry:\t\t${CONTAINER_REGISTRY}"
|
||||
echo -e "Repository:\t\t${CONTAINER_REPOSITORY}"
|
||||
echo -e "Container Registry:\t${CONTAINER_REGISTRY}"
|
||||
echo -e "Container Repository:\t${CONTAINER_REPOSITORY}"
|
||||
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
|
||||
echo -e "Container Flavor:\t${CONTAINER_FLAVOR}"
|
||||
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
|
||||
|
||||
# Create docker volume
|
||||
@ -36,8 +42,9 @@ fi
|
||||
|
||||
# Build Container
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
--platform="${PLATFORM}" \
|
||||
--tag="${CONTAINER_IMAGE}" \
|
||||
--platform="${PLATFORM:-linux/amd64}" \
|
||||
--tag="${CONTAINER_IMAGE:-invokeai}" \
|
||||
${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \
|
||||
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
|
||||
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
|
||||
--file="${DOCKERFILE}" \
|
||||
|
@ -1,19 +1,31 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This file is used to set environment variables for the build.sh and run.sh scripts.
|
||||
|
||||
# Try to detect the container flavor if no PIP_EXTRA_INDEX_URL got specified
|
||||
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
|
||||
|
||||
# Activate virtual environment if not already activated and exists
|
||||
if [[ -z $VIRTUAL_ENV ]]; then
|
||||
[[ -e "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" ]] \
|
||||
&& source "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" \
|
||||
&& echo "Activated virtual environment: $VIRTUAL_ENV"
|
||||
fi
|
||||
|
||||
# Decide which container flavor to build if not specified
|
||||
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
|
||||
# Check for CUDA and ROCm
|
||||
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
|
||||
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
|
||||
if [[ "$(uname -s)" != "Darwin" && "${CUDA_AVAILABLE}" == "True" ]]; then
|
||||
if [[ "${CUDA_AVAILABLE}" == "True" ]]; then
|
||||
CONTAINER_FLAVOR="cuda"
|
||||
elif [[ "$(uname -s)" != "Darwin" && "${ROCM_AVAILABLE}" == "True" ]]; then
|
||||
elif [[ "${ROCM_AVAILABLE}" == "True" ]]; then
|
||||
CONTAINER_FLAVOR="rocm"
|
||||
else
|
||||
CONTAINER_FLAVOR="cpu"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Set PIP_EXTRA_INDEX_URL based on container flavor
|
||||
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
|
||||
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
|
||||
@ -26,9 +38,10 @@ fi
|
||||
|
||||
# Variables shared by build.sh and run.sh
|
||||
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
|
||||
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME,,}_data"}"
|
||||
REPOSITORY_NAME="${REPOSITORY_NAME,,}"
|
||||
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME}_data"}"
|
||||
ARCH="${ARCH-$(uname -m)}"
|
||||
PLATFORM="${PLATFORM-Linux/${ARCH}}"
|
||||
PLATFORM="${PLATFORM-linux/${ARCH}}"
|
||||
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
|
||||
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
|
||||
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
|
||||
|
@ -1,14 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
|
||||
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
|
||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
||||
|
||||
SCRIPTDIR=$(dirname "$0")
|
||||
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||
cd "$SCRIPTDIR" || exit 1
|
||||
|
||||
source ./env.sh
|
||||
|
||||
# Create outputs directory if it does not exist
|
||||
[[ -d ./outputs ]] || mkdir ./outputs
|
||||
|
||||
echo -e "You are using these values:\n"
|
||||
echo -e "Volumename:\t${VOLUMENAME}"
|
||||
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
|
||||
@ -22,10 +24,18 @@ docker run \
|
||||
--name="${REPOSITORY_NAME,,}" \
|
||||
--hostname="${REPOSITORY_NAME,,}" \
|
||||
--mount=source="${VOLUMENAME}",target=/data \
|
||||
${MODELSPATH:+-u "$(id -u):$(id -g)"} \
|
||||
--mount type=bind,source="$(pwd)"/outputs,target=/data/outputs \
|
||||
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
|
||||
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
|
||||
--publish=9090:9090 \
|
||||
--cap-add=sys_nice \
|
||||
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
|
||||
"${CONTAINER_IMAGE}" ${1:+$@}
|
||||
"${CONTAINER_IMAGE}" ${@:+$@}
|
||||
|
||||
# Remove Trash folder
|
||||
for f in outputs/.Trash*; do
|
||||
if [ -e "$f" ]; then
|
||||
rm -Rf "$f"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
@ -125,7 +125,7 @@ manager, please follow these steps:
|
||||
=== "Windows"
|
||||
|
||||
```ps
|
||||
.venv\script\activate
|
||||
.venv\Scripts\activate
|
||||
```
|
||||
|
||||
If you get a permissions error at this point, run this command and try again
|
||||
@ -295,13 +295,12 @@ on your system, please see the [Git Installation
|
||||
Guide](https://github.com/git-guides/install-git)
|
||||
|
||||
1. From the command line, run this command:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
```
|
||||
|
||||
This will create a directory named `InvokeAI` and populate it with the
|
||||
full source code from the InvokeAI repository.
|
||||
This will create a directory named `InvokeAI` and populate it with the
|
||||
full source code from the InvokeAI repository.
|
||||
|
||||
2. Activate the InvokeAI virtual environment as per step (4) of the manual
|
||||
installation protocol (important!)
|
||||
@ -342,7 +341,7 @@ installation protocol (important!)
|
||||
repository. You can then use GitHub functions to create and submit
|
||||
pull requests to contribute improvements to the project.
|
||||
|
||||
Please see [Contributing](/index.md#Contributing) for hints
|
||||
Please see [Contributing](../index.md#contributing) for hints
|
||||
on getting started.
|
||||
|
||||
### Unsupported Conda Install
|
||||
|
@ -223,7 +223,7 @@ class Generate:
|
||||
self.model_name = model or fallback
|
||||
|
||||
# for VRAM usage statistics
|
||||
self.session_peakmem = torch.cuda.max_memory_allocated() if self._has_cuda else None
|
||||
self.session_peakmem = torch.cuda.max_memory_allocated(self.device) if self._has_cuda else None
|
||||
transformers.logging.set_verbosity_error()
|
||||
|
||||
# gets rid of annoying messages about random seed
|
||||
@ -321,6 +321,7 @@ class Generate:
|
||||
codeformer_fidelity = None,
|
||||
save_original = False,
|
||||
upscale = None,
|
||||
upscale_denoise_str = 0.75,
|
||||
# this is specific to inpainting and causes more extreme inpainting
|
||||
inpaint_replace = 0.0,
|
||||
# This controls the size at which inpaint occurs (scaled up for inpaint, then back down for the result)
|
||||
@ -560,6 +561,7 @@ class Generate:
|
||||
if upscale is not None or facetool_strength > 0:
|
||||
self.upscale_and_reconstruct(results,
|
||||
upscale = upscale,
|
||||
upscale_denoise_str = upscale_denoise_str,
|
||||
facetool = facetool,
|
||||
strength = facetool_strength,
|
||||
codeformer_fidelity = codeformer_fidelity,
|
||||
@ -590,20 +592,24 @@ class Generate:
|
||||
self.print_cuda_stats()
|
||||
return results
|
||||
|
||||
def clear_cuda_cache(self):
|
||||
def gather_cuda_stats(self):
|
||||
if self._has_cuda():
|
||||
self.max_memory_allocated = max(
|
||||
self.max_memory_allocated,
|
||||
torch.cuda.max_memory_allocated()
|
||||
torch.cuda.max_memory_allocated(self.device)
|
||||
)
|
||||
self.memory_allocated = max(
|
||||
self.memory_allocated,
|
||||
torch.cuda.memory_allocated()
|
||||
torch.cuda.memory_allocated(self.device)
|
||||
)
|
||||
self.session_peakmem = max(
|
||||
self.session_peakmem,
|
||||
torch.cuda.max_memory_allocated()
|
||||
torch.cuda.max_memory_allocated(self.device)
|
||||
)
|
||||
|
||||
def clear_cuda_cache(self):
|
||||
if self._has_cuda():
|
||||
self.gather_cuda_stats()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
def clear_cuda_stats(self):
|
||||
@ -612,6 +618,7 @@ class Generate:
|
||||
|
||||
def print_cuda_stats(self):
|
||||
if self._has_cuda():
|
||||
self.gather_cuda_stats()
|
||||
print(
|
||||
'>> Max VRAM used for this generation:',
|
||||
'%4.2fG.' % (self.max_memory_allocated / 1e9),
|
||||
@ -633,6 +640,7 @@ class Generate:
|
||||
facetool_strength = 0.0,
|
||||
codeformer_fidelity = 0.75,
|
||||
upscale = None,
|
||||
upscale_denoise_str = 0.75,
|
||||
out_direction = None,
|
||||
outcrop = [],
|
||||
save_original = True, # to get new name
|
||||
@ -684,6 +692,7 @@ class Generate:
|
||||
codeformer_fidelity = codeformer_fidelity,
|
||||
save_original = save_original,
|
||||
upscale = upscale,
|
||||
upscale_denoise_str = upscale_denoise_str,
|
||||
image_callback = callback,
|
||||
prefix = prefix,
|
||||
)
|
||||
@ -952,6 +961,7 @@ class Generate:
|
||||
image_list,
|
||||
facetool = 'gfpgan',
|
||||
upscale = None,
|
||||
upscale_denoise_str = 0.75,
|
||||
strength = 0.0,
|
||||
codeformer_fidelity = 0.75,
|
||||
save_original = False,
|
||||
@ -982,7 +992,7 @@ class Generate:
|
||||
if len(upscale) < 2:
|
||||
upscale.append(0.75)
|
||||
image = self.esrgan.process(
|
||||
image, upscale[1], seed, int(upscale[0]))
|
||||
image, upscale[1], seed, int(upscale[0]), denoise_str=upscale_denoise_str)
|
||||
else:
|
||||
print(">> ESRGAN is disabled. Image not upscaled.")
|
||||
except Exception as e:
|
||||
|
@ -58,12 +58,9 @@ def main():
|
||||
print(f'>> Internet connectivity is {Globals.internet_available}')
|
||||
|
||||
if not args.conf:
|
||||
if not os.path.exists(os.path.join(Globals.root,'configs','models.yaml')):
|
||||
report_model_error(opt, e)
|
||||
# print(f"\n** Error. The file {os.path.join(Globals.root,'configs','models.yaml')} could not be found.")
|
||||
# print('** Please check the location of your invokeai directory and use the --root_dir option to point to the correct path.')
|
||||
# print('** This script will now exit.')
|
||||
# sys.exit(-1)
|
||||
config_file = os.path.join(Globals.root,'configs','models.yaml')
|
||||
if not os.path.exists(config_file):
|
||||
report_model_error(opt, FileNotFoundError(f"The file {config_file} could not be found."))
|
||||
|
||||
print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}')
|
||||
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
|
||||
@ -658,7 +655,9 @@ def import_ckpt_model(path_or_url: Union[Path, str], gen, opt, completer) -> Opt
|
||||
model_description=default_description
|
||||
)
|
||||
config_file = None
|
||||
default = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml')
|
||||
default = Path(Globals.root,'configs/stable-diffusion/v1-inpainting-inference.yaml') \
|
||||
if re.search('inpaint',default_name, flags=re.IGNORECASE) \
|
||||
else Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml')
|
||||
|
||||
completer.complete_extensions(('.yaml','.yml'))
|
||||
completer.set_line(str(default))
|
||||
@ -709,12 +708,21 @@ def _get_model_name_and_desc(model_manager,completer,model_name:str='',model_des
|
||||
model_description = input(f'Description for this model [{model_description}]: ').strip() or model_description
|
||||
return model_name, model_description
|
||||
|
||||
def optimize_model(model_name_or_path:str, gen, opt, completer):
|
||||
def _is_inpainting(model_name_or_path: str)->bool:
|
||||
if re.search('inpaint',model_name_or_path, flags=re.IGNORECASE):
|
||||
return not input('Is this an inpainting model? [y] ').startswith(('n','N'))
|
||||
else:
|
||||
return not input('Is this an inpainting model? [n] ').startswith(('y','Y'))
|
||||
|
||||
def optimize_model(model_name_or_path: str, gen, opt, completer):
|
||||
manager = gen.model_manager
|
||||
ckpt_path = None
|
||||
original_config_file = None
|
||||
|
||||
if (model_info := manager.model_info(model_name_or_path)):
|
||||
if model_name_or_path == gen.model_name:
|
||||
print("** Can't convert the active model. !switch to another model first. **")
|
||||
return
|
||||
elif (model_info := manager.model_info(model_name_or_path)):
|
||||
if 'weights' in model_info:
|
||||
ckpt_path = Path(model_info['weights'])
|
||||
original_config_file = Path(model_info['config'])
|
||||
@ -731,7 +739,7 @@ def optimize_model(model_name_or_path:str, gen, opt, completer):
|
||||
ckpt_path.stem,
|
||||
f'Converted model {ckpt_path.stem}'
|
||||
)
|
||||
is_inpainting = input('Is this an inpainting model? [n] ').startswith(('y','Y'))
|
||||
is_inpainting = _is_inpainting(model_name_or_path)
|
||||
original_config_file = Path(
|
||||
'configs',
|
||||
'stable-diffusion',
|
||||
@ -889,6 +897,7 @@ def do_postprocess (gen, opt, callback):
|
||||
codeformer_fidelity = opt.codeformer_fidelity,
|
||||
save_original = opt.save_original,
|
||||
upscale = opt.upscale,
|
||||
upscale_denoise_str = opt.esrgan_denoise_str,
|
||||
out_direction = opt.out_direction,
|
||||
outcrop = opt.outcrop,
|
||||
callback = callback,
|
||||
@ -950,7 +959,7 @@ def prepare_image_metadata(
|
||||
print(f'** The filename format contains an unknown key \'{e.args[0]}\'. Will use {{prefix}}.{{seed}}.png\' instead')
|
||||
filename = f'{prefix}.{seed}.png'
|
||||
except IndexError:
|
||||
print(f'** The filename format is broken or complete. Will use \'{{prefix}}.{{seed}}.png\' instead')
|
||||
print("** The filename format is broken or complete. Will use '{prefix}.{seed}.png' instead")
|
||||
filename = f'{prefix}.{seed}.png'
|
||||
|
||||
if opt.variation_amount > 0:
|
||||
|
@ -301,10 +301,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
textual_inversion_manager=self.textual_inversion_manager
|
||||
)
|
||||
|
||||
self._enable_memory_efficient_attention()
|
||||
|
||||
|
||||
def _enable_memory_efficient_attention(self):
|
||||
def _adjust_memory_efficient_attention(self, latents: Torch.tensor):
|
||||
"""
|
||||
if xformers is available, use it, otherwise use sliced attention.
|
||||
"""
|
||||
@ -317,7 +315,24 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
# fix is in https://github.com/kulinseth/pytorch/pull/222 but no idea when it will get merged to pytorch mainline.
|
||||
pass
|
||||
else:
|
||||
self.enable_attention_slicing(slice_size='max')
|
||||
if self.device.type == 'cpu' or self.device.type == 'mps':
|
||||
mem_free = psutil.virtual_memory().free
|
||||
elif self.device.type == 'cuda':
|
||||
mem_free, _ = torch.cuda.mem_get_info(self.device)
|
||||
else:
|
||||
raise ValueError(f"unrecognized device {device}")
|
||||
# input tensor of [1, 4, h/8, w/8]
|
||||
# output tensor of [16, (h/8 * w/8), (h/8 * w/8)]
|
||||
bytes_per_element_needed_for_baddbmm_duplication = latents.element_size() + 4
|
||||
max_size_required_for_baddbmm = \
|
||||
16 * \
|
||||
latents.size(dim=2) * latents.size(dim=3) * latents.size(dim=2) * latents.size(dim=3) * \
|
||||
bytes_per_element_needed_for_baddbmm_duplication
|
||||
if max_size_required_for_baddbmm > (mem_free * 3.3 / 4.0): # 3.3 / 4.0 is from old Invoke code
|
||||
self.enable_attention_slicing(slice_size='max')
|
||||
else:
|
||||
self.disable_attention_slicing()
|
||||
|
||||
|
||||
def image_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int,
|
||||
conditioning_data: ConditioningData,
|
||||
@ -377,6 +392,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
noise: torch.Tensor,
|
||||
run_id: str = None,
|
||||
additional_guidance: List[Callable] = None):
|
||||
self._adjust_memory_efficient_attention(latents)
|
||||
if run_id is None:
|
||||
run_id = secrets.token_urlsafe(self.ID_LENGTH)
|
||||
if additional_guidance is None:
|
||||
|
Loading…
Reference in New Issue
Block a user