mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into pr/2561
This commit is contained in:
commit
fb35fe1a41
@ -3,21 +3,23 @@
|
|||||||
!invokeai
|
!invokeai
|
||||||
!ldm
|
!ldm
|
||||||
!pyproject.toml
|
!pyproject.toml
|
||||||
!README.md
|
|
||||||
|
|
||||||
# Guard against pulling in any models that might exist in the directory tree
|
# Guard against pulling in any models that might exist in the directory tree
|
||||||
**/*.pt*
|
**/*.pt*
|
||||||
**/*.ckpt
|
**/*.ckpt
|
||||||
|
|
||||||
# ignore frontend but whitelist dist
|
# ignore frontend but whitelist dist
|
||||||
invokeai/frontend/**
|
invokeai/frontend/
|
||||||
!invokeai/frontend/dist
|
!invokeai/frontend/dist/
|
||||||
|
|
||||||
# ignore invokeai/assets but whitelist invokeai/assets/web
|
# ignore invokeai/assets but whitelist invokeai/assets/web
|
||||||
invokeai/assets
|
invokeai/assets/
|
||||||
!invokeai/assets/web
|
!invokeai/assets/web/
|
||||||
|
|
||||||
# ignore python cache
|
# Byte-compiled / optimized / DLL files
|
||||||
**/__pycache__
|
**/__pycache__/
|
||||||
**/*.py[cod]
|
**/*.py[cod]
|
||||||
**/*.egg-info
|
|
||||||
|
# Distribution / packaging
|
||||||
|
*.egg-info/
|
||||||
|
*.egg
|
||||||
|
56
.github/workflows/build-container.yml
vendored
56
.github/workflows/build-container.yml
vendored
@ -3,7 +3,8 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'main'
|
- 'main'
|
||||||
- 'update/ci/*'
|
- 'update/ci/docker/*'
|
||||||
|
- 'update/docker/*'
|
||||||
tags:
|
tags:
|
||||||
- 'v*.*.*'
|
- 'v*.*.*'
|
||||||
|
|
||||||
@ -20,18 +21,15 @@ jobs:
|
|||||||
include:
|
include:
|
||||||
- flavor: amd
|
- flavor: amd
|
||||||
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||||
dockerfile: docker/Dockerfile
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
- flavor: cuda
|
- flavor: cuda
|
||||||
pip-extra-index-url: ''
|
pip-extra-index-url: ''
|
||||||
dockerfile: docker/Dockerfile
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
- flavor: cpu
|
- flavor: cpu
|
||||||
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||||
dockerfile: docker/Dockerfile
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: ${{ matrix.flavor }}
|
name: ${{ matrix.flavor }}
|
||||||
|
env:
|
||||||
|
PLATFORMS: 'linux/amd64,linux/arm64'
|
||||||
|
DOCKERFILE: 'docker/Dockerfile'
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -41,7 +39,9 @@ jobs:
|
|||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@v4
|
||||||
with:
|
with:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
images: ghcr.io/${{ github.repository }}
|
images: |
|
||||||
|
ghcr.io/${{ github.repository }}
|
||||||
|
${{ vars.DOCKERHUB_REPOSITORY }}
|
||||||
tags: |
|
tags: |
|
||||||
type=ref,event=branch
|
type=ref,event=branch
|
||||||
type=ref,event=tag
|
type=ref,event=tag
|
||||||
@ -52,13 +52,14 @@ jobs:
|
|||||||
flavor: |
|
flavor: |
|
||||||
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
|
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
|
||||||
suffix=-${{ matrix.flavor }},onlatest=false
|
suffix=-${{ matrix.flavor }},onlatest=false
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v2
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
with:
|
with:
|
||||||
platforms: ${{ matrix.platforms }}
|
platforms: ${{ env.PLATFORMS }}
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
@ -68,25 +69,34 @@ jobs:
|
|||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build container
|
- name: Build container
|
||||||
|
id: docker_build
|
||||||
uses: docker/build-push-action@v4
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: ${{ matrix.dockerfile }}
|
file: ${{ env.DOCKERFILE }}
|
||||||
platforms: ${{ matrix.platforms }}
|
platforms: ${{ env.PLATFORMS }}
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' }}
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
|
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
|
||||||
cache-from: type=gha
|
cache-from: |
|
||||||
cache-to: type=gha,mode=max
|
type=gha,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
||||||
|
type=gha,scope=main-${{ matrix.flavor }}
|
||||||
|
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
||||||
|
|
||||||
- name: Output image, digest and metadata to summary
|
- name: Docker Hub Description
|
||||||
run: |
|
if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
|
||||||
{
|
uses: peter-evans/dockerhub-description@v3
|
||||||
echo imageid: "${{ steps.docker_build.outputs.imageid }}"
|
with:
|
||||||
echo digest: "${{ steps.docker_build.outputs.digest }}"
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
echo labels: "${{ steps.meta.outputs.labels }}"
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
echo tags: "${{ steps.meta.outputs.tags }}"
|
repository: ${{ vars.DOCKERHUB_REPOSITORY }}
|
||||||
echo version: "${{ steps.meta.outputs.version }}"
|
short-description: ${{ github.event.repository.description }}
|
||||||
} >> "$GITHUB_STEP_SUMMARY"
|
|
||||||
|
1
.github/workflows/test-invoke-pip.yml
vendored
1
.github/workflows/test-invoke-pip.yml
vendored
@ -8,6 +8,7 @@ on:
|
|||||||
- 'ready_for_review'
|
- 'ready_for_review'
|
||||||
- 'opened'
|
- 'opened'
|
||||||
- 'synchronize'
|
- 'synchronize'
|
||||||
|
merge_group:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
|
@ -1,57 +1,63 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
ARG PYTHON_VERSION=3.9
|
ARG PYTHON_VERSION=3.9
|
||||||
##################
|
##################
|
||||||
## base image ##
|
## base image ##
|
||||||
##################
|
##################
|
||||||
FROM python:${PYTHON_VERSION}-slim AS python-base
|
FROM python:${PYTHON_VERSION}-slim AS python-base
|
||||||
|
|
||||||
|
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
||||||
|
|
||||||
# prepare for buildkit cache
|
# prepare for buildkit cache
|
||||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean
|
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
||||||
|
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
|
||||||
|
|
||||||
# Install necesarry packages
|
# Install necesarry packages
|
||||||
RUN \
|
RUN \
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
apt-get update \
|
apt-get update \
|
||||||
&& apt-get install \
|
&& apt-get install -y \
|
||||||
-yqq \
|
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
libgl1-mesa-glx=20.3.* \
|
libgl1-mesa-glx=20.3.* \
|
||||||
libglib2.0-0=2.66.* \
|
libglib2.0-0=2.66.* \
|
||||||
libopencv-dev=4.5.* \
|
libopencv-dev=4.5.*
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# set working directory and path
|
# set working directory and env
|
||||||
ARG APPDIR=/usr/src
|
ARG APPDIR=/usr/src
|
||||||
ARG APPNAME=InvokeAI
|
ARG APPNAME=InvokeAI
|
||||||
WORKDIR ${APPDIR}
|
WORKDIR ${APPDIR}
|
||||||
ENV PATH=${APPDIR}/${APPNAME}/bin:$PATH
|
ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH
|
||||||
|
# Keeps Python from generating .pyc files in the container
|
||||||
|
ENV PYTHONDONTWRITEBYTECODE 1
|
||||||
|
# Turns off buffering for easier container logging
|
||||||
|
ENV PYTHONUNBUFFERED 1
|
||||||
|
# don't fall back to legacy build system
|
||||||
|
ENV PIP_USE_PEP517=1
|
||||||
|
|
||||||
#######################
|
#######################
|
||||||
## build pyproject ##
|
## build pyproject ##
|
||||||
#######################
|
#######################
|
||||||
FROM python-base AS pyproject-builder
|
FROM python-base AS pyproject-builder
|
||||||
ENV PIP_USE_PEP517=1
|
|
||||||
|
|
||||||
# prepare for buildkit cache
|
# Install dependencies
|
||||||
|
RUN \
|
||||||
|
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
|
apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
build-essential=12.9 \
|
||||||
|
gcc=4:10.2.* \
|
||||||
|
python3-dev=3.9.*
|
||||||
|
|
||||||
|
# prepare pip for buildkit cache
|
||||||
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
|
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
|
||||||
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
|
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
|
||||||
RUN mkdir -p ${PIP_CACHE_DIR}
|
RUN mkdir -p ${PIP_CACHE_DIR}
|
||||||
|
|
||||||
# Install dependencies
|
|
||||||
RUN \
|
|
||||||
--mount=type=cache,target=${PIP_CACHE_DIR} \
|
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
apt-get update \
|
|
||||||
&& apt-get install \
|
|
||||||
-yqq \
|
|
||||||
--no-install-recommends \
|
|
||||||
build-essential=12.9 \
|
|
||||||
gcc=4:10.2.* \
|
|
||||||
python3-dev=3.9.* \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# create virtual environment
|
# create virtual environment
|
||||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
|
||||||
python3 -m venv "${APPNAME}" \
|
python3 -m venv "${APPNAME}" \
|
||||||
--upgrade-deps
|
--upgrade-deps
|
||||||
|
|
||||||
@ -61,9 +67,8 @@ COPY --link . .
|
|||||||
# install pyproject.toml
|
# install pyproject.toml
|
||||||
ARG PIP_EXTRA_INDEX_URL
|
ARG PIP_EXTRA_INDEX_URL
|
||||||
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
||||||
ARG PIP_PACKAGE=.
|
RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
|
||||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
"${APPNAME}/bin/pip" install .
|
||||||
"${APPDIR}/${APPNAME}/bin/pip" install ${PIP_PACKAGE}
|
|
||||||
|
|
||||||
# build patchmatch
|
# build patchmatch
|
||||||
RUN python3 -c "from patchmatch import patch_match"
|
RUN python3 -c "from patchmatch import patch_match"
|
||||||
@ -73,14 +78,26 @@ RUN python3 -c "from patchmatch import patch_match"
|
|||||||
#####################
|
#####################
|
||||||
FROM python-base AS runtime
|
FROM python-base AS runtime
|
||||||
|
|
||||||
# setup environment
|
# Create a new User
|
||||||
COPY --from=pyproject-builder --link ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME}
|
ARG UNAME=appuser
|
||||||
ENV INVOKEAI_ROOT=/data
|
RUN useradd \
|
||||||
ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
|
--no-log-init \
|
||||||
|
-m \
|
||||||
|
-U \
|
||||||
|
"${UNAME}"
|
||||||
|
|
||||||
# set Entrypoint and default CMD
|
# create volume directory
|
||||||
|
ARG VOLUME_DIR=/data
|
||||||
|
RUN mkdir -p "${VOLUME_DIR}" \
|
||||||
|
&& chown -R "${UNAME}" "${VOLUME_DIR}"
|
||||||
|
|
||||||
|
# setup runtime environment
|
||||||
|
USER ${UNAME}
|
||||||
|
COPY --chown=${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME}
|
||||||
|
ENV INVOKEAI_ROOT ${VOLUME_DIR}
|
||||||
|
ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache
|
||||||
|
ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only"
|
||||||
|
EXPOSE 9090
|
||||||
ENTRYPOINT [ "invokeai" ]
|
ENTRYPOINT [ "invokeai" ]
|
||||||
CMD [ "--web", "--host=0.0.0.0" ]
|
CMD [ "--web", "--host", "0.0.0.0", "--port", "9090" ]
|
||||||
VOLUME [ "/data" ]
|
VOLUME [ "${VOLUME_DIR}" ]
|
||||||
|
|
||||||
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
|
||||||
|
@ -1,19 +1,24 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
|
# If you want to build a specific flavor, set the CONTAINER_FLAVOR environment variable
|
||||||
# Some possible pip extra-index urls (cuda 11.7 is available without extra url):
|
# e.g. CONTAINER_FLAVOR=cpu ./build.sh
|
||||||
# CUDA 11.6: https://download.pytorch.org/whl/cu116
|
# Possible Values are:
|
||||||
# ROCm 5.2: https://download.pytorch.org/whl/rocm5.2
|
# - cpu
|
||||||
# CPU: https://download.pytorch.org/whl/cpu
|
# - cuda
|
||||||
# as found on https://pytorch.org/get-started/locally/
|
# - rocm
|
||||||
|
# Don't forget to also set it when executing run.sh
|
||||||
|
# if it is not set, the script will try to detect the flavor by itself.
|
||||||
|
#
|
||||||
|
# Doc can be found here:
|
||||||
|
# https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
||||||
|
|
||||||
SCRIPTDIR=$(dirname "$0")
|
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||||
cd "$SCRIPTDIR" || exit 1
|
cd "$SCRIPTDIR" || exit 1
|
||||||
|
|
||||||
source ./env.sh
|
source ./env.sh
|
||||||
|
|
||||||
DOCKERFILE=${INVOKE_DOCKERFILE:-Dockerfile}
|
DOCKERFILE=${INVOKE_DOCKERFILE:-./Dockerfile}
|
||||||
|
|
||||||
# print the settings
|
# print the settings
|
||||||
echo -e "You are using these values:\n"
|
echo -e "You are using these values:\n"
|
||||||
@ -21,9 +26,10 @@ echo -e "Dockerfile:\t\t${DOCKERFILE}"
|
|||||||
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
|
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
|
||||||
echo -e "Volumename:\t\t${VOLUMENAME}"
|
echo -e "Volumename:\t\t${VOLUMENAME}"
|
||||||
echo -e "Platform:\t\t${PLATFORM}"
|
echo -e "Platform:\t\t${PLATFORM}"
|
||||||
echo -e "Registry:\t\t${CONTAINER_REGISTRY}"
|
echo -e "Container Registry:\t${CONTAINER_REGISTRY}"
|
||||||
echo -e "Repository:\t\t${CONTAINER_REPOSITORY}"
|
echo -e "Container Repository:\t${CONTAINER_REPOSITORY}"
|
||||||
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
|
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
|
||||||
|
echo -e "Container Flavor:\t${CONTAINER_FLAVOR}"
|
||||||
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
|
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
|
||||||
|
|
||||||
# Create docker volume
|
# Create docker volume
|
||||||
@ -36,8 +42,9 @@ fi
|
|||||||
|
|
||||||
# Build Container
|
# Build Container
|
||||||
DOCKER_BUILDKIT=1 docker build \
|
DOCKER_BUILDKIT=1 docker build \
|
||||||
--platform="${PLATFORM}" \
|
--platform="${PLATFORM:-linux/amd64}" \
|
||||||
--tag="${CONTAINER_IMAGE}" \
|
--tag="${CONTAINER_IMAGE:-invokeai}" \
|
||||||
|
${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \
|
||||||
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
|
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
|
||||||
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
|
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
|
||||||
--file="${DOCKERFILE}" \
|
--file="${DOCKERFILE}" \
|
||||||
|
@ -1,19 +1,31 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# This file is used to set environment variables for the build.sh and run.sh scripts.
|
||||||
|
|
||||||
|
# Try to detect the container flavor if no PIP_EXTRA_INDEX_URL got specified
|
||||||
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
|
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
|
||||||
|
|
||||||
|
# Activate virtual environment if not already activated and exists
|
||||||
|
if [[ -z $VIRTUAL_ENV ]]; then
|
||||||
|
[[ -e "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" ]] \
|
||||||
|
&& source "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" \
|
||||||
|
&& echo "Activated virtual environment: $VIRTUAL_ENV"
|
||||||
|
fi
|
||||||
|
|
||||||
# Decide which container flavor to build if not specified
|
# Decide which container flavor to build if not specified
|
||||||
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
|
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
|
||||||
# Check for CUDA and ROCm
|
# Check for CUDA and ROCm
|
||||||
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
|
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
|
||||||
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
|
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
|
||||||
if [[ "$(uname -s)" != "Darwin" && "${CUDA_AVAILABLE}" == "True" ]]; then
|
if [[ "${CUDA_AVAILABLE}" == "True" ]]; then
|
||||||
CONTAINER_FLAVOR="cuda"
|
CONTAINER_FLAVOR="cuda"
|
||||||
elif [[ "$(uname -s)" != "Darwin" && "${ROCM_AVAILABLE}" == "True" ]]; then
|
elif [[ "${ROCM_AVAILABLE}" == "True" ]]; then
|
||||||
CONTAINER_FLAVOR="rocm"
|
CONTAINER_FLAVOR="rocm"
|
||||||
else
|
else
|
||||||
CONTAINER_FLAVOR="cpu"
|
CONTAINER_FLAVOR="cpu"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Set PIP_EXTRA_INDEX_URL based on container flavor
|
# Set PIP_EXTRA_INDEX_URL based on container flavor
|
||||||
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
|
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
|
||||||
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
|
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
|
||||||
@ -26,9 +38,10 @@ fi
|
|||||||
|
|
||||||
# Variables shared by build.sh and run.sh
|
# Variables shared by build.sh and run.sh
|
||||||
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
|
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
|
||||||
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME,,}_data"}"
|
REPOSITORY_NAME="${REPOSITORY_NAME,,}"
|
||||||
|
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME}_data"}"
|
||||||
ARCH="${ARCH-$(uname -m)}"
|
ARCH="${ARCH-$(uname -m)}"
|
||||||
PLATFORM="${PLATFORM-Linux/${ARCH}}"
|
PLATFORM="${PLATFORM-linux/${ARCH}}"
|
||||||
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
|
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
|
||||||
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
|
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
|
||||||
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
|
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
|
||||||
|
@ -1,14 +1,16 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
|
# How to use: https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
||||||
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
|
|
||||||
|
|
||||||
SCRIPTDIR=$(dirname "$0")
|
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||||
cd "$SCRIPTDIR" || exit 1
|
cd "$SCRIPTDIR" || exit 1
|
||||||
|
|
||||||
source ./env.sh
|
source ./env.sh
|
||||||
|
|
||||||
|
# Create outputs directory if it does not exist
|
||||||
|
[[ -d ./outputs ]] || mkdir ./outputs
|
||||||
|
|
||||||
echo -e "You are using these values:\n"
|
echo -e "You are using these values:\n"
|
||||||
echo -e "Volumename:\t${VOLUMENAME}"
|
echo -e "Volumename:\t${VOLUMENAME}"
|
||||||
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
|
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
|
||||||
@ -22,10 +24,18 @@ docker run \
|
|||||||
--name="${REPOSITORY_NAME,,}" \
|
--name="${REPOSITORY_NAME,,}" \
|
||||||
--hostname="${REPOSITORY_NAME,,}" \
|
--hostname="${REPOSITORY_NAME,,}" \
|
||||||
--mount=source="${VOLUMENAME}",target=/data \
|
--mount=source="${VOLUMENAME}",target=/data \
|
||||||
${MODELSPATH:+-u "$(id -u):$(id -g)"} \
|
--mount type=bind,source="$(pwd)"/outputs,target=/data/outputs \
|
||||||
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
|
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
|
||||||
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
|
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
|
||||||
--publish=9090:9090 \
|
--publish=9090:9090 \
|
||||||
--cap-add=sys_nice \
|
--cap-add=sys_nice \
|
||||||
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
|
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
|
||||||
"${CONTAINER_IMAGE}" ${1:+$@}
|
"${CONTAINER_IMAGE}" ${@:+$@}
|
||||||
|
|
||||||
|
# Remove Trash folder
|
||||||
|
for f in outputs/.Trash*; do
|
||||||
|
if [ -e "$f" ]; then
|
||||||
|
rm -Rf "$f"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
@ -125,7 +125,7 @@ manager, please follow these steps:
|
|||||||
=== "Windows"
|
=== "Windows"
|
||||||
|
|
||||||
```ps
|
```ps
|
||||||
.venv\script\activate
|
.venv\Scripts\activate
|
||||||
```
|
```
|
||||||
|
|
||||||
If you get a permissions error at this point, run this command and try again
|
If you get a permissions error at this point, run this command and try again
|
||||||
@ -295,13 +295,12 @@ on your system, please see the [Git Installation
|
|||||||
Guide](https://github.com/git-guides/install-git)
|
Guide](https://github.com/git-guides/install-git)
|
||||||
|
|
||||||
1. From the command line, run this command:
|
1. From the command line, run this command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||||
```
|
```
|
||||||
|
|
||||||
This will create a directory named `InvokeAI` and populate it with the
|
This will create a directory named `InvokeAI` and populate it with the
|
||||||
full source code from the InvokeAI repository.
|
full source code from the InvokeAI repository.
|
||||||
|
|
||||||
2. Activate the InvokeAI virtual environment as per step (4) of the manual
|
2. Activate the InvokeAI virtual environment as per step (4) of the manual
|
||||||
installation protocol (important!)
|
installation protocol (important!)
|
||||||
@ -342,7 +341,7 @@ installation protocol (important!)
|
|||||||
repository. You can then use GitHub functions to create and submit
|
repository. You can then use GitHub functions to create and submit
|
||||||
pull requests to contribute improvements to the project.
|
pull requests to contribute improvements to the project.
|
||||||
|
|
||||||
Please see [Contributing](/index.md#Contributing) for hints
|
Please see [Contributing](../index.md#contributing) for hints
|
||||||
on getting started.
|
on getting started.
|
||||||
|
|
||||||
### Unsupported Conda Install
|
### Unsupported Conda Install
|
||||||
|
@ -223,7 +223,7 @@ class Generate:
|
|||||||
self.model_name = model or fallback
|
self.model_name = model or fallback
|
||||||
|
|
||||||
# for VRAM usage statistics
|
# for VRAM usage statistics
|
||||||
self.session_peakmem = torch.cuda.max_memory_allocated() if self._has_cuda else None
|
self.session_peakmem = torch.cuda.max_memory_allocated(self.device) if self._has_cuda else None
|
||||||
transformers.logging.set_verbosity_error()
|
transformers.logging.set_verbosity_error()
|
||||||
|
|
||||||
# gets rid of annoying messages about random seed
|
# gets rid of annoying messages about random seed
|
||||||
@ -321,6 +321,7 @@ class Generate:
|
|||||||
codeformer_fidelity = None,
|
codeformer_fidelity = None,
|
||||||
save_original = False,
|
save_original = False,
|
||||||
upscale = None,
|
upscale = None,
|
||||||
|
upscale_denoise_str = 0.75,
|
||||||
# this is specific to inpainting and causes more extreme inpainting
|
# this is specific to inpainting and causes more extreme inpainting
|
||||||
inpaint_replace = 0.0,
|
inpaint_replace = 0.0,
|
||||||
# This controls the size at which inpaint occurs (scaled up for inpaint, then back down for the result)
|
# This controls the size at which inpaint occurs (scaled up for inpaint, then back down for the result)
|
||||||
@ -560,6 +561,7 @@ class Generate:
|
|||||||
if upscale is not None or facetool_strength > 0:
|
if upscale is not None or facetool_strength > 0:
|
||||||
self.upscale_and_reconstruct(results,
|
self.upscale_and_reconstruct(results,
|
||||||
upscale = upscale,
|
upscale = upscale,
|
||||||
|
upscale_denoise_str = upscale_denoise_str,
|
||||||
facetool = facetool,
|
facetool = facetool,
|
||||||
strength = facetool_strength,
|
strength = facetool_strength,
|
||||||
codeformer_fidelity = codeformer_fidelity,
|
codeformer_fidelity = codeformer_fidelity,
|
||||||
@ -590,20 +592,24 @@ class Generate:
|
|||||||
self.print_cuda_stats()
|
self.print_cuda_stats()
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def clear_cuda_cache(self):
|
def gather_cuda_stats(self):
|
||||||
if self._has_cuda():
|
if self._has_cuda():
|
||||||
self.max_memory_allocated = max(
|
self.max_memory_allocated = max(
|
||||||
self.max_memory_allocated,
|
self.max_memory_allocated,
|
||||||
torch.cuda.max_memory_allocated()
|
torch.cuda.max_memory_allocated(self.device)
|
||||||
)
|
)
|
||||||
self.memory_allocated = max(
|
self.memory_allocated = max(
|
||||||
self.memory_allocated,
|
self.memory_allocated,
|
||||||
torch.cuda.memory_allocated()
|
torch.cuda.memory_allocated(self.device)
|
||||||
)
|
)
|
||||||
self.session_peakmem = max(
|
self.session_peakmem = max(
|
||||||
self.session_peakmem,
|
self.session_peakmem,
|
||||||
torch.cuda.max_memory_allocated()
|
torch.cuda.max_memory_allocated(self.device)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def clear_cuda_cache(self):
|
||||||
|
if self._has_cuda():
|
||||||
|
self.gather_cuda_stats()
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
def clear_cuda_stats(self):
|
def clear_cuda_stats(self):
|
||||||
@ -612,6 +618,7 @@ class Generate:
|
|||||||
|
|
||||||
def print_cuda_stats(self):
|
def print_cuda_stats(self):
|
||||||
if self._has_cuda():
|
if self._has_cuda():
|
||||||
|
self.gather_cuda_stats()
|
||||||
print(
|
print(
|
||||||
'>> Max VRAM used for this generation:',
|
'>> Max VRAM used for this generation:',
|
||||||
'%4.2fG.' % (self.max_memory_allocated / 1e9),
|
'%4.2fG.' % (self.max_memory_allocated / 1e9),
|
||||||
@ -633,6 +640,7 @@ class Generate:
|
|||||||
facetool_strength = 0.0,
|
facetool_strength = 0.0,
|
||||||
codeformer_fidelity = 0.75,
|
codeformer_fidelity = 0.75,
|
||||||
upscale = None,
|
upscale = None,
|
||||||
|
upscale_denoise_str = 0.75,
|
||||||
out_direction = None,
|
out_direction = None,
|
||||||
outcrop = [],
|
outcrop = [],
|
||||||
save_original = True, # to get new name
|
save_original = True, # to get new name
|
||||||
@ -684,6 +692,7 @@ class Generate:
|
|||||||
codeformer_fidelity = codeformer_fidelity,
|
codeformer_fidelity = codeformer_fidelity,
|
||||||
save_original = save_original,
|
save_original = save_original,
|
||||||
upscale = upscale,
|
upscale = upscale,
|
||||||
|
upscale_denoise_str = upscale_denoise_str,
|
||||||
image_callback = callback,
|
image_callback = callback,
|
||||||
prefix = prefix,
|
prefix = prefix,
|
||||||
)
|
)
|
||||||
@ -952,6 +961,7 @@ class Generate:
|
|||||||
image_list,
|
image_list,
|
||||||
facetool = 'gfpgan',
|
facetool = 'gfpgan',
|
||||||
upscale = None,
|
upscale = None,
|
||||||
|
upscale_denoise_str = 0.75,
|
||||||
strength = 0.0,
|
strength = 0.0,
|
||||||
codeformer_fidelity = 0.75,
|
codeformer_fidelity = 0.75,
|
||||||
save_original = False,
|
save_original = False,
|
||||||
@ -982,7 +992,7 @@ class Generate:
|
|||||||
if len(upscale) < 2:
|
if len(upscale) < 2:
|
||||||
upscale.append(0.75)
|
upscale.append(0.75)
|
||||||
image = self.esrgan.process(
|
image = self.esrgan.process(
|
||||||
image, upscale[1], seed, int(upscale[0]))
|
image, upscale[1], seed, int(upscale[0]), denoise_str=upscale_denoise_str)
|
||||||
else:
|
else:
|
||||||
print(">> ESRGAN is disabled. Image not upscaled.")
|
print(">> ESRGAN is disabled. Image not upscaled.")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -58,12 +58,9 @@ def main():
|
|||||||
print(f'>> Internet connectivity is {Globals.internet_available}')
|
print(f'>> Internet connectivity is {Globals.internet_available}')
|
||||||
|
|
||||||
if not args.conf:
|
if not args.conf:
|
||||||
if not os.path.exists(os.path.join(Globals.root,'configs','models.yaml')):
|
config_file = os.path.join(Globals.root,'configs','models.yaml')
|
||||||
report_model_error(opt, e)
|
if not os.path.exists(config_file):
|
||||||
# print(f"\n** Error. The file {os.path.join(Globals.root,'configs','models.yaml')} could not be found.")
|
report_model_error(opt, FileNotFoundError(f"The file {config_file} could not be found."))
|
||||||
# print('** Please check the location of your invokeai directory and use the --root_dir option to point to the correct path.')
|
|
||||||
# print('** This script will now exit.')
|
|
||||||
# sys.exit(-1)
|
|
||||||
|
|
||||||
print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}')
|
print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}')
|
||||||
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
|
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
|
||||||
@ -658,7 +655,9 @@ def import_ckpt_model(path_or_url: Union[Path, str], gen, opt, completer) -> Opt
|
|||||||
model_description=default_description
|
model_description=default_description
|
||||||
)
|
)
|
||||||
config_file = None
|
config_file = None
|
||||||
default = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml')
|
default = Path(Globals.root,'configs/stable-diffusion/v1-inpainting-inference.yaml') \
|
||||||
|
if re.search('inpaint',default_name, flags=re.IGNORECASE) \
|
||||||
|
else Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml')
|
||||||
|
|
||||||
completer.complete_extensions(('.yaml','.yml'))
|
completer.complete_extensions(('.yaml','.yml'))
|
||||||
completer.set_line(str(default))
|
completer.set_line(str(default))
|
||||||
@ -709,12 +708,21 @@ def _get_model_name_and_desc(model_manager,completer,model_name:str='',model_des
|
|||||||
model_description = input(f'Description for this model [{model_description}]: ').strip() or model_description
|
model_description = input(f'Description for this model [{model_description}]: ').strip() or model_description
|
||||||
return model_name, model_description
|
return model_name, model_description
|
||||||
|
|
||||||
def optimize_model(model_name_or_path:str, gen, opt, completer):
|
def _is_inpainting(model_name_or_path: str)->bool:
|
||||||
|
if re.search('inpaint',model_name_or_path, flags=re.IGNORECASE):
|
||||||
|
return not input('Is this an inpainting model? [y] ').startswith(('n','N'))
|
||||||
|
else:
|
||||||
|
return not input('Is this an inpainting model? [n] ').startswith(('y','Y'))
|
||||||
|
|
||||||
|
def optimize_model(model_name_or_path: str, gen, opt, completer):
|
||||||
manager = gen.model_manager
|
manager = gen.model_manager
|
||||||
ckpt_path = None
|
ckpt_path = None
|
||||||
original_config_file = None
|
original_config_file = None
|
||||||
|
|
||||||
if (model_info := manager.model_info(model_name_or_path)):
|
if model_name_or_path == gen.model_name:
|
||||||
|
print("** Can't convert the active model. !switch to another model first. **")
|
||||||
|
return
|
||||||
|
elif (model_info := manager.model_info(model_name_or_path)):
|
||||||
if 'weights' in model_info:
|
if 'weights' in model_info:
|
||||||
ckpt_path = Path(model_info['weights'])
|
ckpt_path = Path(model_info['weights'])
|
||||||
original_config_file = Path(model_info['config'])
|
original_config_file = Path(model_info['config'])
|
||||||
@ -731,7 +739,7 @@ def optimize_model(model_name_or_path:str, gen, opt, completer):
|
|||||||
ckpt_path.stem,
|
ckpt_path.stem,
|
||||||
f'Converted model {ckpt_path.stem}'
|
f'Converted model {ckpt_path.stem}'
|
||||||
)
|
)
|
||||||
is_inpainting = input('Is this an inpainting model? [n] ').startswith(('y','Y'))
|
is_inpainting = _is_inpainting(model_name_or_path)
|
||||||
original_config_file = Path(
|
original_config_file = Path(
|
||||||
'configs',
|
'configs',
|
||||||
'stable-diffusion',
|
'stable-diffusion',
|
||||||
@ -889,6 +897,7 @@ def do_postprocess (gen, opt, callback):
|
|||||||
codeformer_fidelity = opt.codeformer_fidelity,
|
codeformer_fidelity = opt.codeformer_fidelity,
|
||||||
save_original = opt.save_original,
|
save_original = opt.save_original,
|
||||||
upscale = opt.upscale,
|
upscale = opt.upscale,
|
||||||
|
upscale_denoise_str = opt.esrgan_denoise_str,
|
||||||
out_direction = opt.out_direction,
|
out_direction = opt.out_direction,
|
||||||
outcrop = opt.outcrop,
|
outcrop = opt.outcrop,
|
||||||
callback = callback,
|
callback = callback,
|
||||||
@ -950,7 +959,7 @@ def prepare_image_metadata(
|
|||||||
print(f'** The filename format contains an unknown key \'{e.args[0]}\'. Will use {{prefix}}.{{seed}}.png\' instead')
|
print(f'** The filename format contains an unknown key \'{e.args[0]}\'. Will use {{prefix}}.{{seed}}.png\' instead')
|
||||||
filename = f'{prefix}.{seed}.png'
|
filename = f'{prefix}.{seed}.png'
|
||||||
except IndexError:
|
except IndexError:
|
||||||
print(f'** The filename format is broken or complete. Will use \'{{prefix}}.{{seed}}.png\' instead')
|
print("** The filename format is broken or complete. Will use '{prefix}.{seed}.png' instead")
|
||||||
filename = f'{prefix}.{seed}.png'
|
filename = f'{prefix}.{seed}.png'
|
||||||
|
|
||||||
if opt.variation_amount > 0:
|
if opt.variation_amount > 0:
|
||||||
|
@ -301,10 +301,8 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
|||||||
textual_inversion_manager=self.textual_inversion_manager
|
textual_inversion_manager=self.textual_inversion_manager
|
||||||
)
|
)
|
||||||
|
|
||||||
self._enable_memory_efficient_attention()
|
|
||||||
|
|
||||||
|
def _adjust_memory_efficient_attention(self, latents: Torch.tensor):
|
||||||
def _enable_memory_efficient_attention(self):
|
|
||||||
"""
|
"""
|
||||||
if xformers is available, use it, otherwise use sliced attention.
|
if xformers is available, use it, otherwise use sliced attention.
|
||||||
"""
|
"""
|
||||||
@ -317,7 +315,24 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
|||||||
# fix is in https://github.com/kulinseth/pytorch/pull/222 but no idea when it will get merged to pytorch mainline.
|
# fix is in https://github.com/kulinseth/pytorch/pull/222 but no idea when it will get merged to pytorch mainline.
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
self.enable_attention_slicing(slice_size='max')
|
if self.device.type == 'cpu' or self.device.type == 'mps':
|
||||||
|
mem_free = psutil.virtual_memory().free
|
||||||
|
elif self.device.type == 'cuda':
|
||||||
|
mem_free, _ = torch.cuda.mem_get_info(self.device)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"unrecognized device {device}")
|
||||||
|
# input tensor of [1, 4, h/8, w/8]
|
||||||
|
# output tensor of [16, (h/8 * w/8), (h/8 * w/8)]
|
||||||
|
bytes_per_element_needed_for_baddbmm_duplication = latents.element_size() + 4
|
||||||
|
max_size_required_for_baddbmm = \
|
||||||
|
16 * \
|
||||||
|
latents.size(dim=2) * latents.size(dim=3) * latents.size(dim=2) * latents.size(dim=3) * \
|
||||||
|
bytes_per_element_needed_for_baddbmm_duplication
|
||||||
|
if max_size_required_for_baddbmm > (mem_free * 3.3 / 4.0): # 3.3 / 4.0 is from old Invoke code
|
||||||
|
self.enable_attention_slicing(slice_size='max')
|
||||||
|
else:
|
||||||
|
self.disable_attention_slicing()
|
||||||
|
|
||||||
|
|
||||||
def image_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int,
|
def image_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int,
|
||||||
conditioning_data: ConditioningData,
|
conditioning_data: ConditioningData,
|
||||||
@ -377,6 +392,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
|||||||
noise: torch.Tensor,
|
noise: torch.Tensor,
|
||||||
run_id: str = None,
|
run_id: str = None,
|
||||||
additional_guidance: List[Callable] = None):
|
additional_guidance: List[Callable] = None):
|
||||||
|
self._adjust_memory_efficient_attention(latents)
|
||||||
if run_id is None:
|
if run_id is None:
|
||||||
run_id = secrets.token_urlsafe(self.ID_LENGTH)
|
run_id = secrets.token_urlsafe(self.ID_LENGTH)
|
||||||
if additional_guidance is None:
|
if additional_guidance is None:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user