Merge branch 'main' into update/ci/prepare-test-invoke-pip-for-queue

This commit is contained in:
Lincoln Stein 2023-02-11 22:38:55 -05:00 committed by GitHub
commit 58a1d9aae0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 572 additions and 305 deletions

View File

@ -3,21 +3,23 @@
!invokeai !invokeai
!ldm !ldm
!pyproject.toml !pyproject.toml
!README.md
# Guard against pulling in any models that might exist in the directory tree # Guard against pulling in any models that might exist in the directory tree
**/*.pt* **/*.pt*
**/*.ckpt **/*.ckpt
# ignore frontend but whitelist dist # ignore frontend but whitelist dist
invokeai/frontend/** invokeai/frontend/
!invokeai/frontend/dist !invokeai/frontend/dist/
# ignore invokeai/assets but whitelist invokeai/assets/web # ignore invokeai/assets but whitelist invokeai/assets/web
invokeai/assets invokeai/assets/
!invokeai/assets/web !invokeai/assets/web/
# ignore python cache # Byte-compiled / optimized / DLL files
**/__pycache__ **/__pycache__/
**/*.py[cod] **/*.py[cod]
**/*.egg-info
# Distribution / packaging
*.egg-info/
*.egg

View File

@ -3,7 +3,8 @@ on:
push: push:
branches: branches:
- 'main' - 'main'
- 'update/ci/*' - 'update/ci/docker/*'
- 'update/docker/*'
tags: tags:
- 'v*.*.*' - 'v*.*.*'
@ -20,18 +21,15 @@ jobs:
include: include:
- flavor: amd - flavor: amd
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2' pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
- flavor: cuda - flavor: cuda
pip-extra-index-url: '' pip-extra-index-url: ''
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
- flavor: cpu - flavor: cpu
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu' pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: ${{ matrix.flavor }} name: ${{ matrix.flavor }}
env:
PLATFORMS: 'linux/amd64,linux/arm64'
DOCKERFILE: 'docker/Dockerfile'
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3
@ -41,7 +39,9 @@ jobs:
uses: docker/metadata-action@v4 uses: docker/metadata-action@v4
with: with:
github-token: ${{ secrets.GITHUB_TOKEN }} github-token: ${{ secrets.GITHUB_TOKEN }}
images: ghcr.io/${{ github.repository }} images: |
ghcr.io/${{ github.repository }}
${{ vars.DOCKERHUB_REPOSITORY }}
tags: | tags: |
type=ref,event=branch type=ref,event=branch
type=ref,event=tag type=ref,event=tag
@ -52,13 +52,14 @@ jobs:
flavor: | flavor: |
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }} latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
suffix=-${{ matrix.flavor }},onlatest=false suffix=-${{ matrix.flavor }},onlatest=false
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v2 uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2 uses: docker/setup-buildx-action@v2
with: with:
platforms: ${{ matrix.platforms }} platforms: ${{ env.PLATFORMS }}
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
@ -68,25 +69,34 @@ jobs:
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub
if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build container - name: Build container
id: docker_build
uses: docker/build-push-action@v4 uses: docker/build-push-action@v4
with: with:
context: . context: .
file: ${{ matrix.dockerfile }} file: ${{ env.DOCKERFILE }}
platforms: ${{ matrix.platforms }} platforms: ${{ env.PLATFORMS }}
push: ${{ github.event_name != 'pull_request' }} push: ${{ github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' }}
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }} build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
cache-from: type=gha cache-from: |
cache-to: type=gha,mode=max type=gha,scope=${{ github.ref_name }}-${{ matrix.flavor }}
type=gha,scope=main-${{ matrix.flavor }}
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.flavor }}
- name: Output image, digest and metadata to summary - name: Docker Hub Description
run: | if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
{ uses: peter-evans/dockerhub-description@v3
echo imageid: "${{ steps.docker_build.outputs.imageid }}" with:
echo digest: "${{ steps.docker_build.outputs.digest }}" username: ${{ secrets.DOCKERHUB_USERNAME }}
echo labels: "${{ steps.meta.outputs.labels }}" password: ${{ secrets.DOCKERHUB_TOKEN }}
echo tags: "${{ steps.meta.outputs.tags }}" repository: ${{ vars.DOCKERHUB_REPOSITORY }}
echo version: "${{ steps.meta.outputs.version }}" short-description: ${{ github.event.repository.description }}
} >> "$GITHUB_STEP_SUMMARY"

3
.gitignore vendored
View File

@ -1,4 +1,5 @@
# ignore default image save location and model symbolic link # ignore default image save location and model symbolic link
.idea/
embeddings/ embeddings/
outputs/ outputs/
models/ldm/stable-diffusion-v1/model.ckpt models/ldm/stable-diffusion-v1/model.ckpt
@ -232,4 +233,4 @@ installer/update.bat
installer/update.sh installer/update.sh
# no longer stored in source directory # no longer stored in source directory
models models

146
README.md
View File

@ -1,6 +1,6 @@
<div align="center"> <div align="center">
![project logo](https://github.com/mauwii/InvokeAI/raw/main/docs/assets/invoke_ai_banner.png) ![project logo](https://github.com/invoke-ai/InvokeAI/raw/main/docs/assets/invoke_ai_banner.png)
# InvokeAI: A Stable Diffusion Toolkit # InvokeAI: A Stable Diffusion Toolkit
@ -41,38 +41,136 @@ requests. Be sure to use the provided templates. They will help us diagnose issu
<div align="center"> <div align="center">
![canvas preview](https://github.com/mauwii/InvokeAI/raw/main/docs/assets/canvas_preview.png) ![canvas preview](https://github.com/invoke-ai/InvokeAI/raw/main/docs/assets/canvas_preview.png)
</div> </div>
# Getting Started with InvokeAI ## Table of Contents
1. [Quick Start](#getting-started-with-invokeai)
2. [Installation](#detailed-installation-instructions)
3. [Hardware Requirements](#hardware-requirements)
4. [Features](#features)
5. [Latest Changes](#latest-changes)
6. [Troubleshooting](#troubleshooting)
7. [Contributing](#contributing)
8. [Contributors](#contributors)
9. [Support](#support)
10. [Further Reading](#further-reading)
## Getting Started with InvokeAI
For full installation and upgrade instructions, please see: For full installation and upgrade instructions, please see:
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/) [InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
### Automatic Installer (suggested for 1st time users)
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest) 1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
2. Download the .zip file for your OS (Windows/macOS/Linux). 2. Download the .zip file for your OS (Windows/macOS/Linux).
3. Unzip the file. 3. Unzip the file.
4. If you are on Windows, double-click on the `install.bat` script. On macOS, open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press return. On Linux, run `install.sh`.
5. Wait a while, until it is done.
6. The folder where you ran the installer from will now be filled with lots of files. If you are on Windows, double-click on the `invoke.bat` file. On macOS, open a Terminal window, drag `invoke.sh` from the folder into the Terminal, and press return. On Linux, run `invoke.sh`
7. Press 2 to open the "browser-based UI", press enter/return, wait a minute or two for Stable Diffusion to start up, then open your browser and go to http://localhost:9090.
8. Type `banana sushi` in the box on the top left and click `Invoke`
4. If you are on Windows, double-click on the `install.bat` script. On
macOS, open a Terminal window, drag the file `install.sh` from Finder
into the Terminal, and press return. On Linux, run `install.sh`.
## Table of Contents 5. You'll be asked to confirm the location of the folder in which
to install InvokeAI and its image generation model files. Pick a
location with at least 15 GB of free memory. More if you plan on
installing lots of models.
1. [Installation](#installation) 6. Wait while the installer does its thing. After installing the software,
2. [Hardware Requirements](#hardware-requirements) the installer will launch a script that lets you configure InvokeAI and
3. [Features](#features) select a set of starting image generaiton models.
4. [Latest Changes](#latest-changes)
5. [Troubleshooting](#troubleshooting)
6. [Contributing](#contributing)
7. [Contributors](#contributors)
8. [Support](#support)
9. [Further Reading](#further-reading)
## Installation 7. Find the folder that InvokeAI was installed into (it is not the
same as the unpacked zip file directory!) The default location of this
folder (if you didn't change it in step 5) is `~/invokeai` on
Linux/Mac systems, and `C:\Users\YourName\invokeai` on Windows. This directory will contain launcher scripts named `invoke.sh` and `invoke.bat`.
8. On Windows systems, double-click on the `invoke.bat` file. On
macOS, open a Terminal window, drag `invoke.sh` from the folder into
the Terminal, and press return. On Linux, run `invoke.sh`
9. Press 2 to open the "browser-based UI", press enter/return, wait a
minute or two for Stable Diffusion to start up, then open your browser
and go to http://localhost:9090.
10. Type `banana sushi` in the box on the top left and click `Invoke`
### Command-Line Installation (for users familiar with Terminals)
You must have Python 3.9 or 3.10 installed on your machine. Earlier or later versions are
not supported.
1. Open a command-line window on your machine. The PowerShell is recommended for Windows.
2. Create a directory to install InvokeAI into. You'll need at least 15 GB of free space:
```terminal
mkdir invokeai
````
3. Create a virtual environment named `.venv` inside this directory and activate it:
```terminal
cd invokeai
python -m venv .venv --prompt InvokeAI
```
4. Activate the virtual environment (do it every time you run InvokeAI)
_For Linux/Mac users:_
```sh
source .venv/bin/activate
```
_For Windows users:_
```ps
.venv\Scripts\activate
```
5. Install the InvokeAI module and its dependencies. Choose the command suited for your platform & GPU.
_For Windows/Linux with an NVIDIA GPU:_
```terminal
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
```
_For Linux with an AMD GPU:_
```sh
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
```
_For Macintoshes, either Intel or M1/M2:_
```sh
pip install InvokeAI --use-pep517
```
6. Configure InvokeAI and install a starting set of image generation models (you only need to do this once):
```terminal
invokeai-configure
```
7. Launch the web server (do it every time you run InvokeAI):
```terminal
invokeai --web
```
8. Point your browser to http://localhost:9090 to bring up the web interface.
9. Type `banana sushi` in the box on the top left and click `Invoke`.
Be sure to activate the virtual environment each time before re-launching InvokeAI,
using `source .venv/bin/activate` or `.venv\Scripts\activate`.
### Detailed Installation Instructions
This fork is supported across Linux, Windows and Macintosh. Linux This fork is supported across Linux, Windows and Macintosh. Linux
users can use either an Nvidia-based card (with CUDA support) or an users can use either an Nvidia-based card (with CUDA support) or an
@ -80,13 +178,13 @@ AMD card (using the ROCm driver). For full installation and upgrade
instructions, please see: instructions, please see:
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/) [InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
### Hardware Requirements ## Hardware Requirements
InvokeAI is supported across Linux, Windows and macOS. Linux InvokeAI is supported across Linux, Windows and macOS. Linux
users can use either an Nvidia-based card (with CUDA support) or an users can use either an Nvidia-based card (with CUDA support) or an
AMD card (using the ROCm driver). AMD card (using the ROCm driver).
#### System ### System
You will need one of the following: You will need one of the following:
@ -98,11 +196,11 @@ We do not recommend the GTX 1650 or 1660 series video cards. They are
unable to run in half-precision mode and do not have sufficient VRAM unable to run in half-precision mode and do not have sufficient VRAM
to render 512x512 images. to render 512x512 images.
#### Memory ### Memory
- At least 12 GB Main Memory RAM. - At least 12 GB Main Memory RAM.
#### Disk ### Disk
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies. - At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
@ -152,7 +250,7 @@ Notes](https://github.com/invoke-ai/InvokeAI/releases) and the
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
problems and other issues. problems and other issues.
# Contributing ## Contributing
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
cleanup, testing, or code reviews, is very much encouraged to do so. cleanup, testing, or code reviews, is very much encouraged to do so.

View File

@ -1,57 +1,63 @@
# syntax=docker/dockerfile:1 # syntax=docker/dockerfile:1
ARG PYTHON_VERSION=3.9 ARG PYTHON_VERSION=3.9
################## ##################
## base image ## ## base image ##
################## ##################
FROM python:${PYTHON_VERSION}-slim AS python-base FROM python:${PYTHON_VERSION}-slim AS python-base
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
# prepare for buildkit cache # prepare for buildkit cache
RUN rm -f /etc/apt/apt.conf.d/docker-clean RUN rm -f /etc/apt/apt.conf.d/docker-clean \
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
# Install necesarry packages # Install necesarry packages
RUN \ RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update \ apt-get update \
&& apt-get install \ && apt-get install -y \
-yqq \
--no-install-recommends \ --no-install-recommends \
libgl1-mesa-glx=20.3.* \ libgl1-mesa-glx=20.3.* \
libglib2.0-0=2.66.* \ libglib2.0-0=2.66.* \
libopencv-dev=4.5.* \ libopencv-dev=4.5.*
&& rm -rf /var/lib/apt/lists/*
# set working directory and path # set working directory and env
ARG APPDIR=/usr/src ARG APPDIR=/usr/src
ARG APPNAME=InvokeAI ARG APPNAME=InvokeAI
WORKDIR ${APPDIR} WORKDIR ${APPDIR}
ENV PATH=${APPDIR}/${APPNAME}/bin:$PATH ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH
# Keeps Python from generating .pyc files in the container
ENV PYTHONDONTWRITEBYTECODE 1
# Turns off buffering for easier container logging
ENV PYTHONUNBUFFERED 1
# don't fall back to legacy build system
ENV PIP_USE_PEP517=1
####################### #######################
## build pyproject ## ## build pyproject ##
####################### #######################
FROM python-base AS pyproject-builder FROM python-base AS pyproject-builder
ENV PIP_USE_PEP517=1
# prepare for buildkit cache # Install dependencies
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get install -y \
--no-install-recommends \
build-essential=12.9 \
gcc=4:10.2.* \
python3-dev=3.9.*
# prepare pip for buildkit cache
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR} ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
RUN mkdir -p ${PIP_CACHE_DIR} RUN mkdir -p ${PIP_CACHE_DIR}
# Install dependencies
RUN \
--mount=type=cache,target=${PIP_CACHE_DIR} \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
apt-get update \
&& apt-get install \
-yqq \
--no-install-recommends \
build-essential=12.9 \
gcc=4:10.2.* \
python3-dev=3.9.* \
&& rm -rf /var/lib/apt/lists/*
# create virtual environment # create virtual environment
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \ RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
python3 -m venv "${APPNAME}" \ python3 -m venv "${APPNAME}" \
--upgrade-deps --upgrade-deps
@ -61,9 +67,8 @@ COPY --link . .
# install pyproject.toml # install pyproject.toml
ARG PIP_EXTRA_INDEX_URL ARG PIP_EXTRA_INDEX_URL
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL} ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
ARG PIP_PACKAGE=. RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \ "${APPNAME}/bin/pip" install .
"${APPDIR}/${APPNAME}/bin/pip" install ${PIP_PACKAGE}
# build patchmatch # build patchmatch
RUN python3 -c "from patchmatch import patch_match" RUN python3 -c "from patchmatch import patch_match"
@ -73,14 +78,26 @@ RUN python3 -c "from patchmatch import patch_match"
##################### #####################
FROM python-base AS runtime FROM python-base AS runtime
# setup environment # Create a new User
COPY --from=pyproject-builder --link ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME} ARG UNAME=appuser
ENV INVOKEAI_ROOT=/data RUN useradd \
ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only" --no-log-init \
-m \
-U \
"${UNAME}"
# set Entrypoint and default CMD # create volume directory
ARG VOLUME_DIR=/data
RUN mkdir -p "${VOLUME_DIR}" \
&& chown -R "${UNAME}" "${VOLUME_DIR}"
# setup runtime environment
USER ${UNAME}
COPY --chown=${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME}
ENV INVOKEAI_ROOT ${VOLUME_DIR}
ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache
ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only"
EXPOSE 9090
ENTRYPOINT [ "invokeai" ] ENTRYPOINT [ "invokeai" ]
CMD [ "--web", "--host=0.0.0.0" ] CMD [ "--web", "--host", "0.0.0.0", "--port", "9090" ]
VOLUME [ "/data" ] VOLUME [ "${VOLUME_DIR}" ]
LABEL org.opencontainers.image.authors="mauwii@outlook.de"

View File

@ -1,19 +1,24 @@
#!/usr/bin/env bash #!/usr/bin/env bash
set -e set -e
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup # If you want to build a specific flavor, set the CONTAINER_FLAVOR environment variable
# Some possible pip extra-index urls (cuda 11.7 is available without extra url): # e.g. CONTAINER_FLAVOR=cpu ./build.sh
# CUDA 11.6: https://download.pytorch.org/whl/cu116 # Possible Values are:
# ROCm 5.2: https://download.pytorch.org/whl/rocm5.2 # - cpu
# CPU: https://download.pytorch.org/whl/cpu # - cuda
# as found on https://pytorch.org/get-started/locally/ # - rocm
# Don't forget to also set it when executing run.sh
# if it is not set, the script will try to detect the flavor by itself.
#
# Doc can be found here:
# https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
SCRIPTDIR=$(dirname "$0") SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
cd "$SCRIPTDIR" || exit 1 cd "$SCRIPTDIR" || exit 1
source ./env.sh source ./env.sh
DOCKERFILE=${INVOKE_DOCKERFILE:-Dockerfile} DOCKERFILE=${INVOKE_DOCKERFILE:-./Dockerfile}
# print the settings # print the settings
echo -e "You are using these values:\n" echo -e "You are using these values:\n"
@ -21,9 +26,10 @@ echo -e "Dockerfile:\t\t${DOCKERFILE}"
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}" echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
echo -e "Volumename:\t\t${VOLUMENAME}" echo -e "Volumename:\t\t${VOLUMENAME}"
echo -e "Platform:\t\t${PLATFORM}" echo -e "Platform:\t\t${PLATFORM}"
echo -e "Registry:\t\t${CONTAINER_REGISTRY}" echo -e "Container Registry:\t${CONTAINER_REGISTRY}"
echo -e "Repository:\t\t${CONTAINER_REPOSITORY}" echo -e "Container Repository:\t${CONTAINER_REPOSITORY}"
echo -e "Container Tag:\t\t${CONTAINER_TAG}" echo -e "Container Tag:\t\t${CONTAINER_TAG}"
echo -e "Container Flavor:\t${CONTAINER_FLAVOR}"
echo -e "Container Image:\t${CONTAINER_IMAGE}\n" echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
# Create docker volume # Create docker volume
@ -36,8 +42,9 @@ fi
# Build Container # Build Container
DOCKER_BUILDKIT=1 docker build \ DOCKER_BUILDKIT=1 docker build \
--platform="${PLATFORM}" \ --platform="${PLATFORM:-linux/amd64}" \
--tag="${CONTAINER_IMAGE}" \ --tag="${CONTAINER_IMAGE:-invokeai}" \
${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \ ${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \ ${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
--file="${DOCKERFILE}" \ --file="${DOCKERFILE}" \

View File

@ -1,19 +1,31 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# This file is used to set environment variables for the build.sh and run.sh scripts.
# Try to detect the container flavor if no PIP_EXTRA_INDEX_URL got specified
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
# Activate virtual environment if not already activated and exists
if [[ -z $VIRTUAL_ENV ]]; then
[[ -e "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" ]] \
&& source "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" \
&& echo "Activated virtual environment: $VIRTUAL_ENV"
fi
# Decide which container flavor to build if not specified # Decide which container flavor to build if not specified
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
# Check for CUDA and ROCm # Check for CUDA and ROCm
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())") CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)") ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
if [[ "$(uname -s)" != "Darwin" && "${CUDA_AVAILABLE}" == "True" ]]; then if [[ "${CUDA_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR="cuda" CONTAINER_FLAVOR="cuda"
elif [[ "$(uname -s)" != "Darwin" && "${ROCM_AVAILABLE}" == "True" ]]; then elif [[ "${ROCM_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR="rocm" CONTAINER_FLAVOR="rocm"
else else
CONTAINER_FLAVOR="cpu" CONTAINER_FLAVOR="cpu"
fi fi
fi fi
# Set PIP_EXTRA_INDEX_URL based on container flavor # Set PIP_EXTRA_INDEX_URL based on container flavor
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm" PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
@ -26,9 +38,10 @@ fi
# Variables shared by build.sh and run.sh # Variables shared by build.sh and run.sh
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}" REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME,,}_data"}" REPOSITORY_NAME="${REPOSITORY_NAME,,}"
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME}_data"}"
ARCH="${ARCH-$(uname -m)}" ARCH="${ARCH-$(uname -m)}"
PLATFORM="${PLATFORM-Linux/${ARCH}}" PLATFORM="${PLATFORM-linux/${ARCH}}"
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}" INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}" CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}" CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"

View File

@ -1,14 +1,16 @@
#!/usr/bin/env bash #!/usr/bin/env bash
set -e set -e
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container # How to use: https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
SCRIPTDIR=$(dirname "$0") SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
cd "$SCRIPTDIR" || exit 1 cd "$SCRIPTDIR" || exit 1
source ./env.sh source ./env.sh
# Create outputs directory if it does not exist
[[ -d ./outputs ]] || mkdir ./outputs
echo -e "You are using these values:\n" echo -e "You are using these values:\n"
echo -e "Volumename:\t${VOLUMENAME}" echo -e "Volumename:\t${VOLUMENAME}"
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}" echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
@ -22,10 +24,18 @@ docker run \
--name="${REPOSITORY_NAME,,}" \ --name="${REPOSITORY_NAME,,}" \
--hostname="${REPOSITORY_NAME,,}" \ --hostname="${REPOSITORY_NAME,,}" \
--mount=source="${VOLUMENAME}",target=/data \ --mount=source="${VOLUMENAME}",target=/data \
${MODELSPATH:+-u "$(id -u):$(id -g)"} \ --mount type=bind,source="$(pwd)"/outputs,target=/data/outputs \
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \ ${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \ ${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
--publish=9090:9090 \ --publish=9090:9090 \
--cap-add=sys_nice \ --cap-add=sys_nice \
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \ ${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
"${CONTAINER_IMAGE}" ${1:+$@} "${CONTAINER_IMAGE}" ${@:+$@}
# Remove Trash folder
for f in outputs/.Trash*; do
if [ -e "$f" ]; then
rm -Rf "$f"
break
fi
done

View File

@ -30,25 +30,35 @@ Installation](010_INSTALL_AUTOMATED.md), and in many cases will
already be installed (if, for example, you have used your system for already be installed (if, for example, you have used your system for
gaming): gaming):
* **Python** version 3.9 or 3.10 (3.11 is not recommended). * **Python**
* **CUDA Tools** For those with _NVidia GPUs_, you will need to version 3.9 or 3.10 (3.11 is not recommended).
install the [CUDA toolkit and optionally the XFormers library](070_INSTALL_XFORMERS.md).
* **ROCm Tools** For _Linux users with AMD GPUs_, you will need * **CUDA Tools**
to install the [ROCm toolkit](./030_INSTALL_CUDA_AND_ROCM.md). Note that
InvokeAI does not support AMD GPUs on Windows systems due to
lack of a Windows ROCm library.
* **Visual C++ Libraries** _Windows users_ must install the free For those with _NVidia GPUs_, you will need to
[Visual C++ libraries from Microsoft](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170) install the [CUDA toolkit and optionally the XFormers library](070_INSTALL_XFORMERS.md).
* **The Xcode command line tools** for _Macintosh users_. Instructions are * **ROCm Tools**
available at [Free Code Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
* _Macintosh users_ may also need to run the `Install Certificates` command For _Linux users with AMD GPUs_, you will need
if model downloads give lots of certificate errors. Run: to install the [ROCm toolkit](./030_INSTALL_CUDA_AND_ROCM.md). Note that
`/Applications/Python\ 3.10/Install\ Certificates.command` InvokeAI does not support AMD GPUs on Windows systems due to
lack of a Windows ROCm library.
* **Visual C++ Libraries**
_Windows users_ must install the free
[Visual C++ libraries from Microsoft](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170)
* **The Xcode command line tools**
for _Macintosh users_. Instructions are available at
[Free Code Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
* _Macintosh users_ may also need to run the `Install Certificates` command
if model downloads give lots of certificate errors. Run:
`/Applications/Python\ 3.10/Install\ Certificates.command`
### Installation Walkthrough ### Installation Walkthrough
@ -75,7 +85,7 @@ manager, please follow these steps:
=== "Linux/Mac" === "Linux/Mac"
```bash ```bash
export INVOKEAI_ROOT="~/invokeai" export INVOKEAI_ROOT=~/invokeai
mkdir $INVOKEAI_ROOT mkdir $INVOKEAI_ROOT
``` ```
@ -99,35 +109,30 @@ manager, please follow these steps:
Windows environment variable using the Advanced System Settings dialogue. Windows environment variable using the Advanced System Settings dialogue.
Refer to your operating system documentation for details. Refer to your operating system documentation for details.
```terminal
=== "Linux/Mac" cd $INVOKEAI_ROOT
```bash python -m venv .venv --prompt InvokeAI
cd $INVOKEAI_ROOT ```
python -m venv create .venv
```
=== "Windows"
```bash
cd $INVOKEAI_ROOT
python -m venv create .venv
```
4. Activate the new environment: 4. Activate the new environment:
=== "Linux/Mac" === "Linux/Mac"
```bash
```bash
source .venv/bin/activate source .venv/bin/activate
``` ```
=== "Windows" === "Windows"
```bash
.venv\script\activate
```
If you get a permissions error at this point, run the command
`Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope CurrentUser`
and try `activate` again.
The command-line prompt should change to to show `(.venv)` at the ```ps
.venv\Scripts\activate
```
If you get a permissions error at this point, run this command and try again
`Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser`
The command-line prompt should change to to show `(InvokeAI)` at the
beginning of the prompt. Note that all the following steps should be beginning of the prompt. Note that all the following steps should be
run while inside the INVOKEAI_ROOT directory run while inside the INVOKEAI_ROOT directory
@ -137,40 +142,47 @@ manager, please follow these steps:
python -m pip install --upgrade pip python -m pip install --upgrade pip
``` ```
6. Install the InvokeAI Package. The `--extra-index-url` option is used to select among CUDA, ROCm and CPU/MPS drivers as shown below: 6. Install the InvokeAI Package. The `--extra-index-url` option is used to select among
CUDA, ROCm and CPU/MPS drivers as shown below:
=== "CUDA (NVidia)" === "CUDA (NVidia)"
```bash ```bash
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117 pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
``` ```
=== "ROCm (AMD)" === "ROCm (AMD)"
```bash ```bash
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2 pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
``` ```
=== "CPU (Intel Macs & non-GPU systems)" === "CPU (Intel Macs & non-GPU systems)"
```bash ```bash
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
``` ```
=== "MPS (M1 and M2 Macs)" === "MPS (M1 and M2 Macs)"
```bash ```bash
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu pip install InvokeAI --use-pep517
``` ```
7. Deactivate and reactivate your runtime directory so that the invokeai-specific commands 7. Deactivate and reactivate your runtime directory so that the invokeai-specific commands
become available in the environment become available in the environment
=== "Linux/Macintosh" === "Linux/Macintosh"
```bash ```bash
deactivate && source .venv/bin/activate deactivate && source .venv/bin/activate
``` ```
=== "Windows" === "Windows"
```bash
```ps
deactivate deactivate
.venv\Scripts\activate .venv\Scripts\activate
``` ```
8. Set up the runtime directory 8. Set up the runtime directory
@ -179,7 +191,7 @@ manager, please follow these steps:
models, model config files, directory for textual inversion embeddings, and models, model config files, directory for textual inversion embeddings, and
your outputs. your outputs.
```bash ```terminal
invokeai-configure invokeai-configure
``` ```
@ -283,13 +295,12 @@ on your system, please see the [Git Installation
Guide](https://github.com/git-guides/install-git) Guide](https://github.com/git-guides/install-git)
1. From the command line, run this command: 1. From the command line, run this command:
```bash ```bash
git clone https://github.com/invoke-ai/InvokeAI.git git clone https://github.com/invoke-ai/InvokeAI.git
``` ```
This will create a directory named `InvokeAI` and populate it with the This will create a directory named `InvokeAI` and populate it with the
full source code from the InvokeAI repository. full source code from the InvokeAI repository.
2. Activate the InvokeAI virtual environment as per step (4) of the manual 2. Activate the InvokeAI virtual environment as per step (4) of the manual
installation protocol (important!) installation protocol (important!)
@ -314,7 +325,7 @@ installation protocol (important!)
=== "MPS (M1 and M2 Macs)" === "MPS (M1 and M2 Macs)"
```bash ```bash
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu pip install -e . --use-pep517
``` ```
Be sure to pass `-e` (for an editable install) and don't forget the Be sure to pass `-e` (for an editable install) and don't forget the
@ -330,5 +341,29 @@ installation protocol (important!)
repository. You can then use GitHub functions to create and submit repository. You can then use GitHub functions to create and submit
pull requests to contribute improvements to the project. pull requests to contribute improvements to the project.
Please see [Contributing](/index.md#Contributing) for hints Please see [Contributing](../index.md#contributing) for hints
on getting started. on getting started.
### Unsupported Conda Install
Congratulations, you found the "secret" Conda installation
instructions. If you really **really** want to use Conda with InvokeAI
you can do so using this unsupported recipe:
```
mkdir ~/invokeai
conda create -n invokeai python=3.10
conda activate invokeai
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
invokeai-configure --root ~/invokeai
invokeai --root ~/invokeai --web
```
The `pip install` command shown in this recipe is for Linux/Windows
systems with an NVIDIA GPU. See step (6) above for the command to use
with other platforms/GPU combinations. If you don't wish to pass the
`--root` argument to `invokeai` with each launch, you may set the
environment variable INVOKEAI_ROOT to point to the installation directory.
Note that if you run into problems with the Conda installation, the InvokeAI
staff will **not** be able to help you out. Caveat Emptor!

View File

@ -23,9 +23,11 @@ We thank them for all of their time and hard work.
* @damian0815 - Attention Systems and Gameplay Engineer * @damian0815 - Attention Systems and Gameplay Engineer
* @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer * @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
* @Netsvetaev (Artur Netsvetaev) - UI/UX Developer * @Netsvetaev (Artur Netsvetaev) - UI/UX Developer
* @tildebyte - general gadfly and resident (self-appointed) know-it-all * @tildebyte - General gadfly and resident (self-appointed) know-it-all
* @keturn - Lead for Diffusers port * @keturn - Lead for Diffusers port
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler * @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
* @jpphoto (Jonathan Pollack) - Inference and rendering engine optimization
* @genomancer (Gregg Helt) - Model training and merging
## **Contributions by** ## **Contributions by**

View File

@ -680,7 +680,8 @@ class InvokeAIWebServer:
image = self.esrgan.process( image = self.esrgan.process(
image=image, image=image,
upsampler_scale=postprocessing_parameters["upscale"][0], upsampler_scale=postprocessing_parameters["upscale"][0],
strength=postprocessing_parameters["upscale"][1], denoise_str=postprocessing_parameters["upscale"][1],
strength=postprocessing_parameters["upscale"][2],
seed=seed, seed=seed,
) )
elif postprocessing_parameters["type"] == "gfpgan": elif postprocessing_parameters["type"] == "gfpgan":
@ -1064,6 +1065,7 @@ class InvokeAIWebServer:
image = self.esrgan.process( image = self.esrgan.process(
image=image, image=image,
upsampler_scale=esrgan_parameters["level"], upsampler_scale=esrgan_parameters["level"],
denoise_str=esrgan_parameters['denoise_str'],
strength=esrgan_parameters["strength"], strength=esrgan_parameters["strength"],
seed=seed, seed=seed,
) )
@ -1071,6 +1073,7 @@ class InvokeAIWebServer:
postprocessing = True postprocessing = True
all_parameters["upscale"] = [ all_parameters["upscale"] = [
esrgan_parameters["level"], esrgan_parameters["level"],
esrgan_parameters['denoise_str'],
esrgan_parameters["strength"], esrgan_parameters["strength"],
] ]
@ -1287,7 +1290,8 @@ class InvokeAIWebServer:
{ {
"type": "esrgan", "type": "esrgan",
"scale": int(parameters["upscale"][0]), "scale": int(parameters["upscale"][0]),
"strength": float(parameters["upscale"][1]), "denoise_str": int(parameters["upscale"][1]),
"strength": float(parameters["upscale"][2]),
} }
) )
@ -1361,7 +1365,8 @@ class InvokeAIWebServer:
if parameters["type"] == "esrgan": if parameters["type"] == "esrgan":
postprocessing_metadata["type"] = "esrgan" postprocessing_metadata["type"] = "esrgan"
postprocessing_metadata["scale"] = parameters["upscale"][0] postprocessing_metadata["scale"] = parameters["upscale"][0]
postprocessing_metadata["strength"] = parameters["upscale"][1] postprocessing_metadata["denoise_str"] = parameters["upscale"][1]
postprocessing_metadata["strength"] = parameters["upscale"][2]
elif parameters["type"] == "gfpgan": elif parameters["type"] == "gfpgan":
postprocessing_metadata["type"] = "gfpgan" postprocessing_metadata["type"] = "gfpgan"
postprocessing_metadata["strength"] = parameters["facetool_strength"] postprocessing_metadata["strength"] = parameters["facetool_strength"]

View File

@ -31,6 +31,7 @@ module.exports = {
'space-before-blocks': 'error', 'space-before-blocks': 'error',
'import/prefer-default-export': 'off', 'import/prefer-default-export': 'off',
'@typescript-eslint/no-unused-vars': ['warn', { varsIgnorePattern: '_+' }], '@typescript-eslint/no-unused-vars': ['warn', { varsIgnorePattern: '_+' }],
'prettier/prettier': ['error', { endOfLine: 'auto' }],
}, },
settings: { settings: {
react: { react: {

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -5,8 +5,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>InvokeAI - A Stable Diffusion Toolkit</title> <title>InvokeAI - A Stable Diffusion Toolkit</title>
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" /> <link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
<script type="module" crossorigin src="./assets/index-8606d352.js"></script> <script type="module" crossorigin src="./assets/index-ad762ffd.js"></script>
<link rel="stylesheet" href="./assets/index-b0bf79f4.css"> <link rel="stylesheet" href="./assets/index-fecb6dd4.css">
</head> </head>
<body> <body>

View File

@ -20,6 +20,7 @@
"upscaling": "Upscaling", "upscaling": "Upscaling",
"upscale": "Upscale", "upscale": "Upscale",
"upscaleImage": "Upscale Image", "upscaleImage": "Upscale Image",
"denoisingStrength": "Denoising Strength",
"scale": "Scale", "scale": "Scale",
"otherOptions": "Other Options", "otherOptions": "Other Options",
"seamlessTiling": "Seamless Tiling", "seamlessTiling": "Seamless Tiling",

View File

@ -20,6 +20,7 @@
"upscaling": "Upscaling", "upscaling": "Upscaling",
"upscale": "Upscale", "upscale": "Upscale",
"upscaleImage": "Upscale Image", "upscaleImage": "Upscale Image",
"denoisingStrength": "Denoising Strength",
"scale": "Scale", "scale": "Scale",
"otherOptions": "Other Options", "otherOptions": "Other Options",
"seamlessTiling": "Seamless Tiling", "seamlessTiling": "Seamless Tiling",

View File

@ -92,6 +92,7 @@ export declare type ESRGANMetadata = CommonPostProcessedImageMetadata & {
type: 'esrgan'; type: 'esrgan';
scale: 2 | 4; scale: 2 | 4;
strength: number; strength: number;
denoise_str: number;
}; };
export declare type FacetoolMetadata = CommonPostProcessedImageMetadata & { export declare type FacetoolMetadata = CommonPostProcessedImageMetadata & {

View File

@ -93,11 +93,15 @@ const makeSocketIOEmitters = (
dispatch(setIsProcessing(true)); dispatch(setIsProcessing(true));
const { const {
postprocessing: { upscalingLevel, upscalingStrength }, postprocessing: {
upscalingLevel,
upscalingDenoising,
upscalingStrength,
},
} = getState(); } = getState();
const esrganParameters = { const esrganParameters = {
upscale: [upscalingLevel, upscalingStrength], upscale: [upscalingLevel, upscalingDenoising, upscalingStrength],
}; };
socketio.emit('runPostprocessing', imageToProcess, { socketio.emit('runPostprocessing', imageToProcess, {
type: 'esrgan', type: 'esrgan',

View File

@ -69,6 +69,7 @@ export type BackendGenerationParameters = {
export type BackendEsrGanParameters = { export type BackendEsrGanParameters = {
level: UpscalingLevel; level: UpscalingLevel;
denoise_str: number;
strength: number; strength: number;
}; };
@ -111,13 +112,12 @@ export const frontendToBackendParameters = (
shouldRunFacetool, shouldRunFacetool,
upscalingLevel, upscalingLevel,
upscalingStrength, upscalingStrength,
upscalingDenoising,
} = postprocessingState; } = postprocessingState;
const { const {
cfgScale, cfgScale,
height, height,
img2imgStrength, img2imgStrength,
infillMethod, infillMethod,
initialImage, initialImage,
@ -136,11 +136,9 @@ export const frontendToBackendParameters = (
shouldFitToWidthHeight, shouldFitToWidthHeight,
shouldGenerateVariations, shouldGenerateVariations,
shouldRandomizeSeed, shouldRandomizeSeed,
steps, steps,
threshold, threshold,
tileSize, tileSize,
variationAmount, variationAmount,
width, width,
} = generationState; } = generationState;
@ -190,6 +188,7 @@ export const frontendToBackendParameters = (
if (shouldRunESRGAN) { if (shouldRunESRGAN) {
esrganParameters = { esrganParameters = {
level: upscalingLevel, level: upscalingLevel,
denoise_str: upscalingDenoising,
strength: upscalingStrength, strength: upscalingStrength,
}; };
} }

View File

@ -34,6 +34,7 @@ import {
setFacetoolStrength, setFacetoolStrength,
setFacetoolType, setFacetoolType,
setHiresFix, setHiresFix,
setUpscalingDenoising,
setUpscalingLevel, setUpscalingLevel,
setUpscalingStrength, setUpscalingStrength,
} from 'features/parameters/store/postprocessingSlice'; } from 'features/parameters/store/postprocessingSlice';
@ -147,11 +148,11 @@ const ImageMetadataViewer = memo(
postprocessing, postprocessing,
prompt, prompt,
sampler, sampler,
scale,
seamless, seamless,
seed, seed,
steps, steps,
strength, strength,
denoise_str,
threshold, threshold,
type, type,
variations, variations,
@ -184,27 +185,6 @@ const ImageMetadataViewer = memo(
{['esrgan', 'gfpgan'].includes(type) && ( {['esrgan', 'gfpgan'].includes(type) && (
<MetadataItem label="Original image" value={orig_path} /> <MetadataItem label="Original image" value={orig_path} />
)} )}
{type === 'gfpgan' && strength !== undefined && (
<MetadataItem
label="Fix faces strength"
value={strength}
onClick={() => dispatch(setFacetoolStrength(strength))}
/>
)}
{type === 'esrgan' && scale !== undefined && (
<MetadataItem
label="Upscaling scale"
value={scale}
onClick={() => dispatch(setUpscalingLevel(scale))}
/>
)}
{type === 'esrgan' && strength !== undefined && (
<MetadataItem
label="Upscaling strength"
value={strength}
onClick={() => dispatch(setUpscalingStrength(strength))}
/>
)}
{prompt && ( {prompt && (
<MetadataItem <MetadataItem
label="Prompt" label="Prompt"
@ -331,7 +311,7 @@ const ImageMetadataViewer = memo(
i: number i: number
) => { ) => {
if (postprocess.type === 'esrgan') { if (postprocess.type === 'esrgan') {
const { scale, strength } = postprocess; const { scale, strength, denoise_str } = postprocess;
return ( return (
<Flex <Flex
key={i} key={i}
@ -354,6 +334,15 @@ const ImageMetadataViewer = memo(
dispatch(setUpscalingStrength(strength)) dispatch(setUpscalingStrength(strength))
} }
/> />
{denoise_str !== undefined && (
<MetadataItem
label="Denoising strength"
value={denoise_str}
onClick={() =>
dispatch(setUpscalingDenoising(denoise_str))
}
/>
)}
</Flex> </Flex>
); );
} else if (postprocess.type === 'gfpgan') { } else if (postprocess.type === 'gfpgan') {

View File

@ -1,5 +0,0 @@
.upscale-settings {
display: grid;
grid-template-columns: auto 1fr;
column-gap: 1rem;
}

View File

@ -1,6 +1,7 @@
import { useAppDispatch, useAppSelector } from 'app/storeHooks'; import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import { import {
setUpscalingDenoising,
setUpscalingLevel, setUpscalingLevel,
setUpscalingStrength, setUpscalingStrength,
UpscalingLevel, UpscalingLevel,
@ -8,20 +9,25 @@ import {
import { createSelector } from '@reduxjs/toolkit'; import { createSelector } from '@reduxjs/toolkit';
import { UPSCALING_LEVELS } from 'app/constants'; import { UPSCALING_LEVELS } from 'app/constants';
import IAINumberInput from 'common/components/IAINumberInput';
import IAISelect from 'common/components/IAISelect'; import IAISelect from 'common/components/IAISelect';
import { postprocessingSelector } from 'features/parameters/store/postprocessingSelectors'; import { postprocessingSelector } from 'features/parameters/store/postprocessingSelectors';
import { systemSelector } from 'features/system/store/systemSelectors'; import { systemSelector } from 'features/system/store/systemSelectors';
import { isEqual } from 'lodash'; import { isEqual } from 'lodash';
import { ChangeEvent } from 'react'; import { ChangeEvent } from 'react';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
import IAISlider from 'common/components/IAISlider';
import { Flex } from '@chakra-ui/react';
const parametersSelector = createSelector( const parametersSelector = createSelector(
[postprocessingSelector, systemSelector], [postprocessingSelector, systemSelector],
({ upscalingLevel, upscalingStrength }, { isESRGANAvailable }) => { (
{ upscalingLevel, upscalingStrength, upscalingDenoising },
{ isESRGANAvailable }
) => {
return { return {
upscalingLevel, upscalingLevel,
upscalingDenoising,
upscalingStrength, upscalingStrength,
isESRGANAvailable, isESRGANAvailable,
}; };
@ -38,8 +44,12 @@ const parametersSelector = createSelector(
*/ */
const UpscaleSettings = () => { const UpscaleSettings = () => {
const dispatch = useAppDispatch(); const dispatch = useAppDispatch();
const { upscalingLevel, upscalingStrength, isESRGANAvailable } = const {
useAppSelector(parametersSelector); upscalingLevel,
upscalingStrength,
upscalingDenoising,
isESRGANAvailable,
} = useAppSelector(parametersSelector);
const { t } = useTranslation(); const { t } = useTranslation();
@ -49,7 +59,7 @@ const UpscaleSettings = () => {
const handleChangeStrength = (v: number) => dispatch(setUpscalingStrength(v)); const handleChangeStrength = (v: number) => dispatch(setUpscalingStrength(v));
return ( return (
<div className="upscale-settings"> <Flex flexDir="column" rowGap="1rem" minWidth="20rem">
<IAISelect <IAISelect
isDisabled={!isESRGANAvailable} isDisabled={!isESRGANAvailable}
label={t('parameters:scale')} label={t('parameters:scale')}
@ -57,17 +67,39 @@ const UpscaleSettings = () => {
onChange={handleChangeLevel} onChange={handleChangeLevel}
validValues={UPSCALING_LEVELS} validValues={UPSCALING_LEVELS}
/> />
<IAINumberInput <IAISlider
isDisabled={!isESRGANAvailable} label={t('parameters:denoisingStrength')}
label={t('parameters:strength')} value={upscalingDenoising}
step={0.05}
min={0} min={0}
max={1} max={1}
onChange={handleChangeStrength} step={0.01}
value={upscalingStrength} onChange={(v) => {
isInteger={false} dispatch(setUpscalingDenoising(v));
}}
handleReset={() => dispatch(setUpscalingDenoising(0.75))}
withSliderMarks
withInput
withReset
isSliderDisabled={!isESRGANAvailable}
isInputDisabled={!isESRGANAvailable}
isResetDisabled={!isESRGANAvailable}
/> />
</div> <IAISlider
label={`${t('parameters:upscale')} ${t('parameters:strength')}`}
value={upscalingStrength}
min={0}
max={1}
step={0.05}
onChange={handleChangeStrength}
handleReset={() => dispatch(setUpscalingStrength(0.75))}
withSliderMarks
withInput
withReset
isSliderDisabled={!isESRGANAvailable}
isInputDisabled={!isESRGANAvailable}
isResetDisabled={!isESRGANAvailable}
/>
</Flex>
); );
}; };

View File

@ -195,8 +195,11 @@ export const generationSlice = createSlice({
} else { } else {
state.threshold = threshold; state.threshold = threshold;
} }
if (perlin) state.perlin = perlin; if (typeof perlin === 'undefined') {
if (typeof perlin === 'undefined') state.perlin = 0; state.perlin = 0;
} else {
state.perlin = perlin;
}
if (typeof seamless === 'boolean') state.seamless = seamless; if (typeof seamless === 'boolean') state.seamless = seamless;
// if (typeof hires_fix === 'boolean') state.hiresFix = hires_fix; // TODO: Needs to be fixed after reorg // if (typeof hires_fix === 'boolean') state.hiresFix = hires_fix; // TODO: Needs to be fixed after reorg
if (width) state.width = width; if (width) state.width = width;
@ -268,10 +271,16 @@ export const generationSlice = createSlice({
if (sampler) state.sampler = sampler; if (sampler) state.sampler = sampler;
if (steps) state.steps = steps; if (steps) state.steps = steps;
if (cfg_scale) state.cfgScale = cfg_scale; if (cfg_scale) state.cfgScale = cfg_scale;
if (threshold) state.threshold = threshold; if (typeof threshold === 'undefined') {
if (typeof threshold === 'undefined') state.threshold = 0; state.threshold = 0;
if (perlin) state.perlin = perlin; } else {
if (typeof perlin === 'undefined') state.perlin = 0; state.threshold = threshold;
}
if (typeof perlin === 'undefined') {
state.perlin = 0;
} else {
state.perlin = perlin;
}
if (typeof seamless === 'boolean') state.seamless = seamless; if (typeof seamless === 'boolean') state.seamless = seamless;
// if (typeof hires_fix === 'boolean') state.hiresFix = hires_fix; // TODO: Needs to be fixed after reorg // if (typeof hires_fix === 'boolean') state.hiresFix = hires_fix; // TODO: Needs to be fixed after reorg
if (width) state.width = width; if (width) state.width = width;

View File

@ -16,6 +16,7 @@ export interface PostprocessingState {
shouldRunESRGAN: boolean; shouldRunESRGAN: boolean;
shouldRunFacetool: boolean; shouldRunFacetool: boolean;
upscalingLevel: UpscalingLevel; upscalingLevel: UpscalingLevel;
upscalingDenoising: number;
upscalingStrength: number; upscalingStrength: number;
} }
@ -29,6 +30,7 @@ const initialPostprocessingState: PostprocessingState = {
shouldRunESRGAN: false, shouldRunESRGAN: false,
shouldRunFacetool: false, shouldRunFacetool: false,
upscalingLevel: 4, upscalingLevel: 4,
upscalingDenoising: 0.75,
upscalingStrength: 0.75, upscalingStrength: 0.75,
}; };
@ -47,6 +49,9 @@ export const postprocessingSlice = createSlice({
setUpscalingLevel: (state, action: PayloadAction<UpscalingLevel>) => { setUpscalingLevel: (state, action: PayloadAction<UpscalingLevel>) => {
state.upscalingLevel = action.payload; state.upscalingLevel = action.payload;
}, },
setUpscalingDenoising: (state, action: PayloadAction<number>) => {
state.upscalingDenoising = action.payload;
},
setUpscalingStrength: (state, action: PayloadAction<number>) => { setUpscalingStrength: (state, action: PayloadAction<number>) => {
state.upscalingStrength = action.payload; state.upscalingStrength = action.payload;
}, },
@ -88,6 +93,7 @@ export const {
setShouldRunESRGAN, setShouldRunESRGAN,
setShouldRunFacetool, setShouldRunFacetool,
setUpscalingLevel, setUpscalingLevel,
setUpscalingDenoising,
setUpscalingStrength, setUpscalingStrength,
} = postprocessingSlice.actions; } = postprocessingSlice.actions;

View File

@ -27,7 +27,6 @@
@use '../features/parameters/components/ProcessButtons/ProcessButtons.scss'; @use '../features/parameters/components/ProcessButtons/ProcessButtons.scss';
@use '../features/parameters/components/MainParameters/MainParameters.scss'; @use '../features/parameters/components/MainParameters/MainParameters.scss';
@use '../features/parameters/components/AccordionItems/AdvancedSettings.scss'; @use '../features/parameters/components/AccordionItems/AdvancedSettings.scss';
@use '../features/parameters/components/AdvancedParameters/Upscale/UpscaleSettings.scss';
@use '../features/parameters/components/AdvancedParameters/Canvas/BoundingBox/BoundingBoxSettings.scss'; @use '../features/parameters/components/AdvancedParameters/Canvas/BoundingBox/BoundingBoxSettings.scss';
// gallery // gallery

File diff suppressed because one or more lines are too long

View File

@ -321,6 +321,7 @@ class Generate:
codeformer_fidelity = None, codeformer_fidelity = None,
save_original = False, save_original = False,
upscale = None, upscale = None,
upscale_denoise_str = 0.75,
# this is specific to inpainting and causes more extreme inpainting # this is specific to inpainting and causes more extreme inpainting
inpaint_replace = 0.0, inpaint_replace = 0.0,
# This controls the size at which inpaint occurs (scaled up for inpaint, then back down for the result) # This controls the size at which inpaint occurs (scaled up for inpaint, then back down for the result)
@ -560,6 +561,7 @@ class Generate:
if upscale is not None or facetool_strength > 0: if upscale is not None or facetool_strength > 0:
self.upscale_and_reconstruct(results, self.upscale_and_reconstruct(results,
upscale = upscale, upscale = upscale,
upscale_denoise_str = upscale_denoise_str,
facetool = facetool, facetool = facetool,
strength = facetool_strength, strength = facetool_strength,
codeformer_fidelity = codeformer_fidelity, codeformer_fidelity = codeformer_fidelity,
@ -633,6 +635,7 @@ class Generate:
facetool_strength = 0.0, facetool_strength = 0.0,
codeformer_fidelity = 0.75, codeformer_fidelity = 0.75,
upscale = None, upscale = None,
upscale_denoise_str = 0.75,
out_direction = None, out_direction = None,
outcrop = [], outcrop = [],
save_original = True, # to get new name save_original = True, # to get new name
@ -684,6 +687,7 @@ class Generate:
codeformer_fidelity = codeformer_fidelity, codeformer_fidelity = codeformer_fidelity,
save_original = save_original, save_original = save_original,
upscale = upscale, upscale = upscale,
upscale_denoise_str = upscale_denoise_str,
image_callback = callback, image_callback = callback,
prefix = prefix, prefix = prefix,
) )
@ -952,6 +956,7 @@ class Generate:
image_list, image_list,
facetool = 'gfpgan', facetool = 'gfpgan',
upscale = None, upscale = None,
upscale_denoise_str = 0.75,
strength = 0.0, strength = 0.0,
codeformer_fidelity = 0.75, codeformer_fidelity = 0.75,
save_original = False, save_original = False,
@ -982,7 +987,7 @@ class Generate:
if len(upscale) < 2: if len(upscale) < 2:
upscale.append(0.75) upscale.append(0.75)
image = self.esrgan.process( image = self.esrgan.process(
image, upscale[1], seed, int(upscale[0])) image, upscale[1], seed, int(upscale[0]), denoise_str=upscale_denoise_str)
else: else:
print(">> ESRGAN is disabled. Image not upscaled.") print(">> ESRGAN is disabled. Image not upscaled.")
except Exception as e: except Exception as e:

View File

@ -58,12 +58,9 @@ def main():
print(f'>> Internet connectivity is {Globals.internet_available}') print(f'>> Internet connectivity is {Globals.internet_available}')
if not args.conf: if not args.conf:
if not os.path.exists(os.path.join(Globals.root,'configs','models.yaml')): config_file = os.path.join(Globals.root,'configs','models.yaml')
report_model_error(opt, e) if not os.path.exists(config_file):
# print(f"\n** Error. The file {os.path.join(Globals.root,'configs','models.yaml')} could not be found.") report_model_error(opt, FileNotFoundError(f"The file {config_file} could not be found."))
# print('** Please check the location of your invokeai directory and use the --root_dir option to point to the correct path.')
# print('** This script will now exit.')
# sys.exit(-1)
print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}') print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}')
print(f'>> InvokeAI runtime directory is "{Globals.root}"') print(f'>> InvokeAI runtime directory is "{Globals.root}"')
@ -658,7 +655,9 @@ def import_ckpt_model(path_or_url: Union[Path, str], gen, opt, completer) -> Opt
model_description=default_description model_description=default_description
) )
config_file = None config_file = None
default = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml') default = Path(Globals.root,'configs/stable-diffusion/v1-inpainting-inference.yaml') \
if re.search('inpaint',default_name, flags=re.IGNORECASE) \
else Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml')
completer.complete_extensions(('.yaml','.yml')) completer.complete_extensions(('.yaml','.yml'))
completer.set_line(str(default)) completer.set_line(str(default))
@ -709,12 +708,21 @@ def _get_model_name_and_desc(model_manager,completer,model_name:str='',model_des
model_description = input(f'Description for this model [{model_description}]: ').strip() or model_description model_description = input(f'Description for this model [{model_description}]: ').strip() or model_description
return model_name, model_description return model_name, model_description
def optimize_model(model_name_or_path:str, gen, opt, completer): def _is_inpainting(model_name_or_path: str)->bool:
if re.search('inpaint',model_name_or_path, flags=re.IGNORECASE):
return not input('Is this an inpainting model? [y] ').startswith(('n','N'))
else:
return not input('Is this an inpainting model? [n] ').startswith(('y','Y'))
def optimize_model(model_name_or_path: str, gen, opt, completer):
manager = gen.model_manager manager = gen.model_manager
ckpt_path = None ckpt_path = None
original_config_file = None original_config_file = None
if (model_info := manager.model_info(model_name_or_path)): if model_name_or_path == gen.model_name:
print("** Can't convert the active model. !switch to another model first. **")
return
elif (model_info := manager.model_info(model_name_or_path)):
if 'weights' in model_info: if 'weights' in model_info:
ckpt_path = Path(model_info['weights']) ckpt_path = Path(model_info['weights'])
original_config_file = Path(model_info['config']) original_config_file = Path(model_info['config'])
@ -731,7 +739,7 @@ def optimize_model(model_name_or_path:str, gen, opt, completer):
ckpt_path.stem, ckpt_path.stem,
f'Converted model {ckpt_path.stem}' f'Converted model {ckpt_path.stem}'
) )
is_inpainting = input('Is this an inpainting model? [n] ').startswith(('y','Y')) is_inpainting = _is_inpainting(model_name_or_path)
original_config_file = Path( original_config_file = Path(
'configs', 'configs',
'stable-diffusion', 'stable-diffusion',
@ -889,6 +897,7 @@ def do_postprocess (gen, opt, callback):
codeformer_fidelity = opt.codeformer_fidelity, codeformer_fidelity = opt.codeformer_fidelity,
save_original = opt.save_original, save_original = opt.save_original,
upscale = opt.upscale, upscale = opt.upscale,
upscale_denoise_str = opt.esrgan_denoise_str,
out_direction = opt.out_direction, out_direction = opt.out_direction,
outcrop = opt.outcrop, outcrop = opt.outcrop,
callback = callback, callback = callback,
@ -950,7 +959,7 @@ def prepare_image_metadata(
print(f'** The filename format contains an unknown key \'{e.args[0]}\'. Will use {{prefix}}.{{seed}}.png\' instead') print(f'** The filename format contains an unknown key \'{e.args[0]}\'. Will use {{prefix}}.{{seed}}.png\' instead')
filename = f'{prefix}.{seed}.png' filename = f'{prefix}.{seed}.png'
except IndexError: except IndexError:
print(f'** The filename format is broken or complete. Will use \'{{prefix}}.{{seed}}.png\' instead') print("** The filename format is broken or complete. Will use '{prefix}.{seed}.png' instead")
filename = f'{prefix}.{seed}.png' filename = f'{prefix}.{seed}.png'
if opt.variation_amount > 0: if opt.variation_amount > 0:

View File

@ -1 +1 @@
__version__='2.3.0-rc7' __version__='2.3.0'

View File

@ -671,6 +671,12 @@ class Args(object):
default=400, default=400,
help='Tile size for background sampler, 0 for no tile during testing. Default: 400.', help='Tile size for background sampler, 0 for no tile during testing. Default: 400.',
) )
postprocessing_group.add_argument(
'--esrgan_denoise_str',
type=float,
default=0.75,
help='esrgan denoise str. 0 is no denoise, 1 is max denoise. Default: 0.75',
)
postprocessing_group.add_argument( postprocessing_group.add_argument(
'--gfpgan_model_path', '--gfpgan_model_path',
type=str, type=str,

View File

@ -128,7 +128,7 @@ script do it for you. Manual installation is described at:
https://invoke-ai.github.io/InvokeAI/installation/020_INSTALL_MANUAL/ https://invoke-ai.github.io/InvokeAI/installation/020_INSTALL_MANUAL/
You may download the recommended models (about 15GB total), install all models (40 GB!!) You may download the recommended models (about 15GB total), install all models (40 GB!!)
select a customized set, or completely skip this step. select a customized set, or completely skip this step.
""" """
) )
@ -583,7 +583,7 @@ def new_config_file_contents(successfully_downloaded: dict, config_file: Path, o
# model is a diffusers (indicated with a path) # model is a diffusers (indicated with a path)
if conf.get(model) and Path(successfully_downloaded[model]).is_dir(): if conf.get(model) and Path(successfully_downloaded[model]).is_dir():
offer_to_delete_weights(model, conf[model], opt.yes_to_all) offer_to_delete_weights(model, conf[model], opt.yes_to_all)
stanza = {} stanza = {}
mod = Datasets[model] mod = Datasets[model]
stanza["description"] = mod["description"] stanza["description"] = mod["description"]
@ -635,7 +635,7 @@ def offer_to_delete_weights(model_name: str, conf_stanza: dict, yes_to_all: bool
weights.unlink() weights.unlink()
except OSError as e: except OSError as e:
print(str(e)) print(str(e))
# --------------------------------------------- # ---------------------------------------------
# this will preload the Bert tokenizer fles # this will preload the Bert tokenizer fles
def download_bert(): def download_bert():
@ -683,10 +683,18 @@ def download_clip():
def download_realesrgan(): def download_realesrgan():
print("Installing models from RealESRGAN...", file=sys.stderr) print("Installing models from RealESRGAN...", file=sys.stderr)
model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth" model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth"
wdn_model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth"
model_dest = os.path.join( model_dest = os.path.join(
Globals.root, "models/realesrgan/realesr-general-x4v3.pth" Globals.root, "models/realesrgan/realesr-general-x4v3.pth"
) )
wdn_model_dest = os.path.join(
Globals.root, "models/realesrgan/realesr-general-wdn-x4v3.pth"
)
download_with_progress_bar(model_url, model_dest, "RealESRGAN") download_with_progress_bar(model_url, model_dest, "RealESRGAN")
download_with_progress_bar(wdn_model_url, wdn_model_dest, "RealESRGANwdn")
def download_gfpgan(): def download_gfpgan():

View File

@ -16,7 +16,7 @@ class ESRGAN():
else: else:
use_half_precision = True use_half_precision = True
def load_esrgan_bg_upsampler(self): def load_esrgan_bg_upsampler(self, denoise_str):
if not torch.cuda.is_available(): # CPU or MPS on M1 if not torch.cuda.is_available(): # CPU or MPS on M1
use_half_precision = False use_half_precision = False
else: else:
@ -26,14 +26,16 @@ class ESRGAN():
from realesrgan import RealESRGANer from realesrgan import RealESRGANer
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu') model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
model_path = os.path.join(Globals.root,'models/realesrgan/realesr-general-x4v3.pth') model_path = os.path.join(Globals.root, 'models/realesrgan/realesr-general-x4v3.pth')
wdn_model_path = os.path.join(Globals.root, 'models/realesrgan/realesr-general-wdn-x4v3.pth')
scale = 4 scale = 4
bg_upsampler = RealESRGANer( bg_upsampler = RealESRGANer(
scale=scale, scale=scale,
model_path=model_path, model_path=[model_path, wdn_model_path],
model=model, model=model,
tile=self.bg_tile_size, tile=self.bg_tile_size,
dni_weight=[denoise_str, 1 - denoise_str],
tile_pad=10, tile_pad=10,
pre_pad=0, pre_pad=0,
half=use_half_precision, half=use_half_precision,
@ -41,13 +43,13 @@ class ESRGAN():
return bg_upsampler return bg_upsampler
def process(self, image: ImageType, strength: float, seed: str = None, upsampler_scale: int = 2): def process(self, image: ImageType, strength: float, seed: str = None, upsampler_scale: int = 2, denoise_str: float = 0.75):
with warnings.catch_warnings(): with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning) warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=UserWarning) warnings.filterwarnings('ignore', category=UserWarning)
try: try:
upsampler = self.load_esrgan_bg_upsampler() upsampler = self.load_esrgan_bg_upsampler(denoise_str)
except Exception: except Exception:
import traceback import traceback
import sys import sys
@ -60,7 +62,7 @@ class ESRGAN():
if seed is not None: if seed is not None:
print( print(
f'>> Real-ESRGAN Upscaling seed:{seed} : scale:{upsampler_scale}x' f'>> Real-ESRGAN Upscaling seed:{seed}, scale:{upsampler_scale}x, tile:{self.bg_tile_size}, denoise:{denoise_str}'
) )
# ESRGAN outputs images with partial transparency if given RGBA images; convert to RGB # ESRGAN outputs images with partial transparency if given RGBA images; convert to RGB
image = image.convert("RGB") image = image.convert("RGB")