Merge branch 'main' into update/ci/prepare-test-invoke-pip-for-queue

This commit is contained in:
Lincoln Stein 2023-02-11 22:38:55 -05:00 committed by GitHub
commit 58a1d9aae0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 572 additions and 305 deletions

View File

@ -3,21 +3,23 @@
!invokeai
!ldm
!pyproject.toml
!README.md
# Guard against pulling in any models that might exist in the directory tree
**/*.pt*
**/*.ckpt
# ignore frontend but whitelist dist
invokeai/frontend/**
!invokeai/frontend/dist
invokeai/frontend/
!invokeai/frontend/dist/
# ignore invokeai/assets but whitelist invokeai/assets/web
invokeai/assets
!invokeai/assets/web
invokeai/assets/
!invokeai/assets/web/
# ignore python cache
**/__pycache__
# Byte-compiled / optimized / DLL files
**/__pycache__/
**/*.py[cod]
**/*.egg-info
# Distribution / packaging
*.egg-info/
*.egg

View File

@ -3,7 +3,8 @@ on:
push:
branches:
- 'main'
- 'update/ci/*'
- 'update/ci/docker/*'
- 'update/docker/*'
tags:
- 'v*.*.*'
@ -20,18 +21,15 @@ jobs:
include:
- flavor: amd
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
- flavor: cuda
pip-extra-index-url: ''
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
- flavor: cpu
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
runs-on: ubuntu-latest
name: ${{ matrix.flavor }}
env:
PLATFORMS: 'linux/amd64,linux/arm64'
DOCKERFILE: 'docker/Dockerfile'
steps:
- name: Checkout
uses: actions/checkout@v3
@ -41,7 +39,9 @@ jobs:
uses: docker/metadata-action@v4
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
images: ghcr.io/${{ github.repository }}
images: |
ghcr.io/${{ github.repository }}
${{ vars.DOCKERHUB_REPOSITORY }}
tags: |
type=ref,event=branch
type=ref,event=tag
@ -52,13 +52,14 @@ jobs:
flavor: |
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
suffix=-${{ matrix.flavor }},onlatest=false
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
platforms: ${{ matrix.platforms }}
platforms: ${{ env.PLATFORMS }}
- name: Login to GitHub Container Registry
if: github.event_name != 'pull_request'
@ -68,25 +69,34 @@ jobs:
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub
if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build container
id: docker_build
uses: docker/build-push-action@v4
with:
context: .
file: ${{ matrix.dockerfile }}
platforms: ${{ matrix.platforms }}
push: ${{ github.event_name != 'pull_request' }}
file: ${{ env.DOCKERFILE }}
platforms: ${{ env.PLATFORMS }}
push: ${{ github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
cache-from: type=gha
cache-to: type=gha,mode=max
cache-from: |
type=gha,scope=${{ github.ref_name }}-${{ matrix.flavor }}
type=gha,scope=main-${{ matrix.flavor }}
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.flavor }}
- name: Output image, digest and metadata to summary
run: |
{
echo imageid: "${{ steps.docker_build.outputs.imageid }}"
echo digest: "${{ steps.docker_build.outputs.digest }}"
echo labels: "${{ steps.meta.outputs.labels }}"
echo tags: "${{ steps.meta.outputs.tags }}"
echo version: "${{ steps.meta.outputs.version }}"
} >> "$GITHUB_STEP_SUMMARY"
- name: Docker Hub Description
if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
uses: peter-evans/dockerhub-description@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
repository: ${{ vars.DOCKERHUB_REPOSITORY }}
short-description: ${{ github.event.repository.description }}

3
.gitignore vendored
View File

@ -1,4 +1,5 @@
# ignore default image save location and model symbolic link
.idea/
embeddings/
outputs/
models/ldm/stable-diffusion-v1/model.ckpt
@ -232,4 +233,4 @@ installer/update.bat
installer/update.sh
# no longer stored in source directory
models
models

146
README.md
View File

@ -1,6 +1,6 @@
<div align="center">
![project logo](https://github.com/mauwii/InvokeAI/raw/main/docs/assets/invoke_ai_banner.png)
![project logo](https://github.com/invoke-ai/InvokeAI/raw/main/docs/assets/invoke_ai_banner.png)
# InvokeAI: A Stable Diffusion Toolkit
@ -41,38 +41,136 @@ requests. Be sure to use the provided templates. They will help us diagnose issu
<div align="center">
![canvas preview](https://github.com/mauwii/InvokeAI/raw/main/docs/assets/canvas_preview.png)
![canvas preview](https://github.com/invoke-ai/InvokeAI/raw/main/docs/assets/canvas_preview.png)
</div>
# Getting Started with InvokeAI
## Table of Contents
1. [Quick Start](#getting-started-with-invokeai)
2. [Installation](#detailed-installation-instructions)
3. [Hardware Requirements](#hardware-requirements)
4. [Features](#features)
5. [Latest Changes](#latest-changes)
6. [Troubleshooting](#troubleshooting)
7. [Contributing](#contributing)
8. [Contributors](#contributors)
9. [Support](#support)
10. [Further Reading](#further-reading)
## Getting Started with InvokeAI
For full installation and upgrade instructions, please see:
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
### Automatic Installer (suggested for 1st time users)
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
2. Download the .zip file for your OS (Windows/macOS/Linux).
3. Unzip the file.
4. If you are on Windows, double-click on the `install.bat` script. On macOS, open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press return. On Linux, run `install.sh`.
5. Wait a while, until it is done.
6. The folder where you ran the installer from will now be filled with lots of files. If you are on Windows, double-click on the `invoke.bat` file. On macOS, open a Terminal window, drag `invoke.sh` from the folder into the Terminal, and press return. On Linux, run `invoke.sh`
7. Press 2 to open the "browser-based UI", press enter/return, wait a minute or two for Stable Diffusion to start up, then open your browser and go to http://localhost:9090.
8. Type `banana sushi` in the box on the top left and click `Invoke`
4. If you are on Windows, double-click on the `install.bat` script. On
macOS, open a Terminal window, drag the file `install.sh` from Finder
into the Terminal, and press return. On Linux, run `install.sh`.
## Table of Contents
5. You'll be asked to confirm the location of the folder in which
to install InvokeAI and its image generation model files. Pick a
location with at least 15 GB of free memory. More if you plan on
installing lots of models.
1. [Installation](#installation)
2. [Hardware Requirements](#hardware-requirements)
3. [Features](#features)
4. [Latest Changes](#latest-changes)
5. [Troubleshooting](#troubleshooting)
6. [Contributing](#contributing)
7. [Contributors](#contributors)
8. [Support](#support)
9. [Further Reading](#further-reading)
6. Wait while the installer does its thing. After installing the software,
the installer will launch a script that lets you configure InvokeAI and
select a set of starting image generaiton models.
## Installation
7. Find the folder that InvokeAI was installed into (it is not the
same as the unpacked zip file directory!) The default location of this
folder (if you didn't change it in step 5) is `~/invokeai` on
Linux/Mac systems, and `C:\Users\YourName\invokeai` on Windows. This directory will contain launcher scripts named `invoke.sh` and `invoke.bat`.
8. On Windows systems, double-click on the `invoke.bat` file. On
macOS, open a Terminal window, drag `invoke.sh` from the folder into
the Terminal, and press return. On Linux, run `invoke.sh`
9. Press 2 to open the "browser-based UI", press enter/return, wait a
minute or two for Stable Diffusion to start up, then open your browser
and go to http://localhost:9090.
10. Type `banana sushi` in the box on the top left and click `Invoke`
### Command-Line Installation (for users familiar with Terminals)
You must have Python 3.9 or 3.10 installed on your machine. Earlier or later versions are
not supported.
1. Open a command-line window on your machine. The PowerShell is recommended for Windows.
2. Create a directory to install InvokeAI into. You'll need at least 15 GB of free space:
```terminal
mkdir invokeai
````
3. Create a virtual environment named `.venv` inside this directory and activate it:
```terminal
cd invokeai
python -m venv .venv --prompt InvokeAI
```
4. Activate the virtual environment (do it every time you run InvokeAI)
_For Linux/Mac users:_
```sh
source .venv/bin/activate
```
_For Windows users:_
```ps
.venv\Scripts\activate
```
5. Install the InvokeAI module and its dependencies. Choose the command suited for your platform & GPU.
_For Windows/Linux with an NVIDIA GPU:_
```terminal
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
```
_For Linux with an AMD GPU:_
```sh
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
```
_For Macintoshes, either Intel or M1/M2:_
```sh
pip install InvokeAI --use-pep517
```
6. Configure InvokeAI and install a starting set of image generation models (you only need to do this once):
```terminal
invokeai-configure
```
7. Launch the web server (do it every time you run InvokeAI):
```terminal
invokeai --web
```
8. Point your browser to http://localhost:9090 to bring up the web interface.
9. Type `banana sushi` in the box on the top left and click `Invoke`.
Be sure to activate the virtual environment each time before re-launching InvokeAI,
using `source .venv/bin/activate` or `.venv\Scripts\activate`.
### Detailed Installation Instructions
This fork is supported across Linux, Windows and Macintosh. Linux
users can use either an Nvidia-based card (with CUDA support) or an
@ -80,13 +178,13 @@ AMD card (using the ROCm driver). For full installation and upgrade
instructions, please see:
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
### Hardware Requirements
## Hardware Requirements
InvokeAI is supported across Linux, Windows and macOS. Linux
users can use either an Nvidia-based card (with CUDA support) or an
AMD card (using the ROCm driver).
#### System
### System
You will need one of the following:
@ -98,11 +196,11 @@ We do not recommend the GTX 1650 or 1660 series video cards. They are
unable to run in half-precision mode and do not have sufficient VRAM
to render 512x512 images.
#### Memory
### Memory
- At least 12 GB Main Memory RAM.
#### Disk
### Disk
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
@ -152,7 +250,7 @@ Notes](https://github.com/invoke-ai/InvokeAI/releases) and the
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
problems and other issues.
# Contributing
## Contributing
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
cleanup, testing, or code reviews, is very much encouraged to do so.

View File

@ -1,57 +1,63 @@
# syntax=docker/dockerfile:1
ARG PYTHON_VERSION=3.9
##################
## base image ##
##################
FROM python:${PYTHON_VERSION}-slim AS python-base
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
# prepare for buildkit cache
RUN rm -f /etc/apt/apt.conf.d/docker-clean
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
# Install necesarry packages
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get install \
-yqq \
&& apt-get install -y \
--no-install-recommends \
libgl1-mesa-glx=20.3.* \
libglib2.0-0=2.66.* \
libopencv-dev=4.5.* \
&& rm -rf /var/lib/apt/lists/*
libopencv-dev=4.5.*
# set working directory and path
# set working directory and env
ARG APPDIR=/usr/src
ARG APPNAME=InvokeAI
WORKDIR ${APPDIR}
ENV PATH=${APPDIR}/${APPNAME}/bin:$PATH
ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH
# Keeps Python from generating .pyc files in the container
ENV PYTHONDONTWRITEBYTECODE 1
# Turns off buffering for easier container logging
ENV PYTHONUNBUFFERED 1
# don't fall back to legacy build system
ENV PIP_USE_PEP517=1
#######################
## build pyproject ##
#######################
FROM python-base AS pyproject-builder
ENV PIP_USE_PEP517=1
# prepare for buildkit cache
# Install dependencies
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get install -y \
--no-install-recommends \
build-essential=12.9 \
gcc=4:10.2.* \
python3-dev=3.9.*
# prepare pip for buildkit cache
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
RUN mkdir -p ${PIP_CACHE_DIR}
# Install dependencies
RUN \
--mount=type=cache,target=${PIP_CACHE_DIR} \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
apt-get update \
&& apt-get install \
-yqq \
--no-install-recommends \
build-essential=12.9 \
gcc=4:10.2.* \
python3-dev=3.9.* \
&& rm -rf /var/lib/apt/lists/*
# create virtual environment
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
python3 -m venv "${APPNAME}" \
--upgrade-deps
@ -61,9 +67,8 @@ COPY --link . .
# install pyproject.toml
ARG PIP_EXTRA_INDEX_URL
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
ARG PIP_PACKAGE=.
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
"${APPDIR}/${APPNAME}/bin/pip" install ${PIP_PACKAGE}
RUN --mount=type=cache,target=${PIP_CACHE_DIR},sharing=locked \
"${APPNAME}/bin/pip" install .
# build patchmatch
RUN python3 -c "from patchmatch import patch_match"
@ -73,14 +78,26 @@ RUN python3 -c "from patchmatch import patch_match"
#####################
FROM python-base AS runtime
# setup environment
COPY --from=pyproject-builder --link ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME}
ENV INVOKEAI_ROOT=/data
ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
# Create a new User
ARG UNAME=appuser
RUN useradd \
--no-log-init \
-m \
-U \
"${UNAME}"
# set Entrypoint and default CMD
# create volume directory
ARG VOLUME_DIR=/data
RUN mkdir -p "${VOLUME_DIR}" \
&& chown -R "${UNAME}" "${VOLUME_DIR}"
# setup runtime environment
USER ${UNAME}
COPY --chown=${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME}
ENV INVOKEAI_ROOT ${VOLUME_DIR}
ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache
ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only"
EXPOSE 9090
ENTRYPOINT [ "invokeai" ]
CMD [ "--web", "--host=0.0.0.0" ]
VOLUME [ "/data" ]
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
CMD [ "--web", "--host", "0.0.0.0", "--port", "9090" ]
VOLUME [ "${VOLUME_DIR}" ]

View File

@ -1,19 +1,24 @@
#!/usr/bin/env bash
set -e
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
# Some possible pip extra-index urls (cuda 11.7 is available without extra url):
# CUDA 11.6: https://download.pytorch.org/whl/cu116
# ROCm 5.2: https://download.pytorch.org/whl/rocm5.2
# CPU: https://download.pytorch.org/whl/cpu
# as found on https://pytorch.org/get-started/locally/
# If you want to build a specific flavor, set the CONTAINER_FLAVOR environment variable
# e.g. CONTAINER_FLAVOR=cpu ./build.sh
# Possible Values are:
# - cpu
# - cuda
# - rocm
# Don't forget to also set it when executing run.sh
# if it is not set, the script will try to detect the flavor by itself.
#
# Doc can be found here:
# https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
SCRIPTDIR=$(dirname "$0")
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
cd "$SCRIPTDIR" || exit 1
source ./env.sh
DOCKERFILE=${INVOKE_DOCKERFILE:-Dockerfile}
DOCKERFILE=${INVOKE_DOCKERFILE:-./Dockerfile}
# print the settings
echo -e "You are using these values:\n"
@ -21,9 +26,10 @@ echo -e "Dockerfile:\t\t${DOCKERFILE}"
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
echo -e "Volumename:\t\t${VOLUMENAME}"
echo -e "Platform:\t\t${PLATFORM}"
echo -e "Registry:\t\t${CONTAINER_REGISTRY}"
echo -e "Repository:\t\t${CONTAINER_REPOSITORY}"
echo -e "Container Registry:\t${CONTAINER_REGISTRY}"
echo -e "Container Repository:\t${CONTAINER_REPOSITORY}"
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
echo -e "Container Flavor:\t${CONTAINER_FLAVOR}"
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
# Create docker volume
@ -36,8 +42,9 @@ fi
# Build Container
DOCKER_BUILDKIT=1 docker build \
--platform="${PLATFORM}" \
--tag="${CONTAINER_IMAGE}" \
--platform="${PLATFORM:-linux/amd64}" \
--tag="${CONTAINER_IMAGE:-invokeai}" \
${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
--file="${DOCKERFILE}" \

View File

@ -1,19 +1,31 @@
#!/usr/bin/env bash
# This file is used to set environment variables for the build.sh and run.sh scripts.
# Try to detect the container flavor if no PIP_EXTRA_INDEX_URL got specified
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
# Activate virtual environment if not already activated and exists
if [[ -z $VIRTUAL_ENV ]]; then
[[ -e "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" ]] \
&& source "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" \
&& echo "Activated virtual environment: $VIRTUAL_ENV"
fi
# Decide which container flavor to build if not specified
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
# Check for CUDA and ROCm
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
if [[ "$(uname -s)" != "Darwin" && "${CUDA_AVAILABLE}" == "True" ]]; then
if [[ "${CUDA_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR="cuda"
elif [[ "$(uname -s)" != "Darwin" && "${ROCM_AVAILABLE}" == "True" ]]; then
elif [[ "${ROCM_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR="rocm"
else
CONTAINER_FLAVOR="cpu"
fi
fi
# Set PIP_EXTRA_INDEX_URL based on container flavor
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
@ -26,9 +38,10 @@ fi
# Variables shared by build.sh and run.sh
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME,,}_data"}"
REPOSITORY_NAME="${REPOSITORY_NAME,,}"
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME}_data"}"
ARCH="${ARCH-$(uname -m)}"
PLATFORM="${PLATFORM-Linux/${ARCH}}"
PLATFORM="${PLATFORM-linux/${ARCH}}"
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"

View File

@ -1,14 +1,16 @@
#!/usr/bin/env bash
set -e
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
# How to use: https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
SCRIPTDIR=$(dirname "$0")
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
cd "$SCRIPTDIR" || exit 1
source ./env.sh
# Create outputs directory if it does not exist
[[ -d ./outputs ]] || mkdir ./outputs
echo -e "You are using these values:\n"
echo -e "Volumename:\t${VOLUMENAME}"
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
@ -22,10 +24,18 @@ docker run \
--name="${REPOSITORY_NAME,,}" \
--hostname="${REPOSITORY_NAME,,}" \
--mount=source="${VOLUMENAME}",target=/data \
${MODELSPATH:+-u "$(id -u):$(id -g)"} \
--mount type=bind,source="$(pwd)"/outputs,target=/data/outputs \
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
--publish=9090:9090 \
--cap-add=sys_nice \
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
"${CONTAINER_IMAGE}" ${1:+$@}
"${CONTAINER_IMAGE}" ${@:+$@}
# Remove Trash folder
for f in outputs/.Trash*; do
if [ -e "$f" ]; then
rm -Rf "$f"
break
fi
done

View File

@ -30,25 +30,35 @@ Installation](010_INSTALL_AUTOMATED.md), and in many cases will
already be installed (if, for example, you have used your system for
gaming):
* **Python** version 3.9 or 3.10 (3.11 is not recommended).
* **Python**
* **CUDA Tools** For those with _NVidia GPUs_, you will need to
install the [CUDA toolkit and optionally the XFormers library](070_INSTALL_XFORMERS.md).
version 3.9 or 3.10 (3.11 is not recommended).
* **ROCm Tools** For _Linux users with AMD GPUs_, you will need
to install the [ROCm toolkit](./030_INSTALL_CUDA_AND_ROCM.md). Note that
InvokeAI does not support AMD GPUs on Windows systems due to
lack of a Windows ROCm library.
* **CUDA Tools**
* **Visual C++ Libraries** _Windows users_ must install the free
[Visual C++ libraries from Microsoft](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170)
For those with _NVidia GPUs_, you will need to
install the [CUDA toolkit and optionally the XFormers library](070_INSTALL_XFORMERS.md).
* **The Xcode command line tools** for _Macintosh users_. Instructions are
available at [Free Code Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
* **ROCm Tools**
* _Macintosh users_ may also need to run the `Install Certificates` command
if model downloads give lots of certificate errors. Run:
`/Applications/Python\ 3.10/Install\ Certificates.command`
For _Linux users with AMD GPUs_, you will need
to install the [ROCm toolkit](./030_INSTALL_CUDA_AND_ROCM.md). Note that
InvokeAI does not support AMD GPUs on Windows systems due to
lack of a Windows ROCm library.
* **Visual C++ Libraries**
_Windows users_ must install the free
[Visual C++ libraries from Microsoft](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170)
* **The Xcode command line tools**
for _Macintosh users_. Instructions are available at
[Free Code Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
* _Macintosh users_ may also need to run the `Install Certificates` command
if model downloads give lots of certificate errors. Run:
`/Applications/Python\ 3.10/Install\ Certificates.command`
### Installation Walkthrough
@ -75,7 +85,7 @@ manager, please follow these steps:
=== "Linux/Mac"
```bash
export INVOKEAI_ROOT="~/invokeai"
export INVOKEAI_ROOT=~/invokeai
mkdir $INVOKEAI_ROOT
```
@ -99,35 +109,30 @@ manager, please follow these steps:
Windows environment variable using the Advanced System Settings dialogue.
Refer to your operating system documentation for details.
=== "Linux/Mac"
```bash
cd $INVOKEAI_ROOT
python -m venv create .venv
```
=== "Windows"
```bash
cd $INVOKEAI_ROOT
python -m venv create .venv
```
```terminal
cd $INVOKEAI_ROOT
python -m venv .venv --prompt InvokeAI
```
4. Activate the new environment:
=== "Linux/Mac"
```bash
```bash
source .venv/bin/activate
```
```
=== "Windows"
```bash
.venv\script\activate
```
If you get a permissions error at this point, run the command
`Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope CurrentUser`
and try `activate` again.
The command-line prompt should change to to show `(.venv)` at the
```ps
.venv\Scripts\activate
```
If you get a permissions error at this point, run this command and try again
`Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser`
The command-line prompt should change to to show `(InvokeAI)` at the
beginning of the prompt. Note that all the following steps should be
run while inside the INVOKEAI_ROOT directory
@ -137,40 +142,47 @@ manager, please follow these steps:
python -m pip install --upgrade pip
```
6. Install the InvokeAI Package. The `--extra-index-url` option is used to select among CUDA, ROCm and CPU/MPS drivers as shown below:
6. Install the InvokeAI Package. The `--extra-index-url` option is used to select among
CUDA, ROCm and CPU/MPS drivers as shown below:
=== "CUDA (NVidia)"
```bash
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
```
=== "ROCm (AMD)"
```bash
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
```
=== "CPU (Intel Macs & non-GPU systems)"
```bash
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
```
=== "MPS (M1 and M2 Macs)"
```bash
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
pip install InvokeAI --use-pep517
```
7. Deactivate and reactivate your runtime directory so that the invokeai-specific commands
become available in the environment
=== "Linux/Macintosh"
```bash
deactivate && source .venv/bin/activate
```
=== "Windows"
```bash
```ps
deactivate
.venv\Scripts\activate
.venv\Scripts\activate
```
8. Set up the runtime directory
@ -179,7 +191,7 @@ manager, please follow these steps:
models, model config files, directory for textual inversion embeddings, and
your outputs.
```bash
```terminal
invokeai-configure
```
@ -283,13 +295,12 @@ on your system, please see the [Git Installation
Guide](https://github.com/git-guides/install-git)
1. From the command line, run this command:
```bash
git clone https://github.com/invoke-ai/InvokeAI.git
```
This will create a directory named `InvokeAI` and populate it with the
full source code from the InvokeAI repository.
This will create a directory named `InvokeAI` and populate it with the
full source code from the InvokeAI repository.
2. Activate the InvokeAI virtual environment as per step (4) of the manual
installation protocol (important!)
@ -314,7 +325,7 @@ installation protocol (important!)
=== "MPS (M1 and M2 Macs)"
```bash
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
pip install -e . --use-pep517
```
Be sure to pass `-e` (for an editable install) and don't forget the
@ -330,5 +341,29 @@ installation protocol (important!)
repository. You can then use GitHub functions to create and submit
pull requests to contribute improvements to the project.
Please see [Contributing](/index.md#Contributing) for hints
Please see [Contributing](../index.md#contributing) for hints
on getting started.
### Unsupported Conda Install
Congratulations, you found the "secret" Conda installation
instructions. If you really **really** want to use Conda with InvokeAI
you can do so using this unsupported recipe:
```
mkdir ~/invokeai
conda create -n invokeai python=3.10
conda activate invokeai
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
invokeai-configure --root ~/invokeai
invokeai --root ~/invokeai --web
```
The `pip install` command shown in this recipe is for Linux/Windows
systems with an NVIDIA GPU. See step (6) above for the command to use
with other platforms/GPU combinations. If you don't wish to pass the
`--root` argument to `invokeai` with each launch, you may set the
environment variable INVOKEAI_ROOT to point to the installation directory.
Note that if you run into problems with the Conda installation, the InvokeAI
staff will **not** be able to help you out. Caveat Emptor!

View File

@ -23,9 +23,11 @@ We thank them for all of their time and hard work.
* @damian0815 - Attention Systems and Gameplay Engineer
* @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
* @Netsvetaev (Artur Netsvetaev) - UI/UX Developer
* @tildebyte - general gadfly and resident (self-appointed) know-it-all
* @tildebyte - General gadfly and resident (self-appointed) know-it-all
* @keturn - Lead for Diffusers port
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
* @jpphoto (Jonathan Pollack) - Inference and rendering engine optimization
* @genomancer (Gregg Helt) - Model training and merging
## **Contributions by**

View File

@ -680,7 +680,8 @@ class InvokeAIWebServer:
image = self.esrgan.process(
image=image,
upsampler_scale=postprocessing_parameters["upscale"][0],
strength=postprocessing_parameters["upscale"][1],
denoise_str=postprocessing_parameters["upscale"][1],
strength=postprocessing_parameters["upscale"][2],
seed=seed,
)
elif postprocessing_parameters["type"] == "gfpgan":
@ -1064,6 +1065,7 @@ class InvokeAIWebServer:
image = self.esrgan.process(
image=image,
upsampler_scale=esrgan_parameters["level"],
denoise_str=esrgan_parameters['denoise_str'],
strength=esrgan_parameters["strength"],
seed=seed,
)
@ -1071,6 +1073,7 @@ class InvokeAIWebServer:
postprocessing = True
all_parameters["upscale"] = [
esrgan_parameters["level"],
esrgan_parameters['denoise_str'],
esrgan_parameters["strength"],
]
@ -1287,7 +1290,8 @@ class InvokeAIWebServer:
{
"type": "esrgan",
"scale": int(parameters["upscale"][0]),
"strength": float(parameters["upscale"][1]),
"denoise_str": int(parameters["upscale"][1]),
"strength": float(parameters["upscale"][2]),
}
)
@ -1361,7 +1365,8 @@ class InvokeAIWebServer:
if parameters["type"] == "esrgan":
postprocessing_metadata["type"] = "esrgan"
postprocessing_metadata["scale"] = parameters["upscale"][0]
postprocessing_metadata["strength"] = parameters["upscale"][1]
postprocessing_metadata["denoise_str"] = parameters["upscale"][1]
postprocessing_metadata["strength"] = parameters["upscale"][2]
elif parameters["type"] == "gfpgan":
postprocessing_metadata["type"] = "gfpgan"
postprocessing_metadata["strength"] = parameters["facetool_strength"]

View File

@ -31,6 +31,7 @@ module.exports = {
'space-before-blocks': 'error',
'import/prefer-default-export': 'off',
'@typescript-eslint/no-unused-vars': ['warn', { varsIgnorePattern: '_+' }],
'prettier/prettier': ['error', { endOfLine: 'auto' }],
},
settings: {
react: {

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -5,8 +5,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>InvokeAI - A Stable Diffusion Toolkit</title>
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
<script type="module" crossorigin src="./assets/index-8606d352.js"></script>
<link rel="stylesheet" href="./assets/index-b0bf79f4.css">
<script type="module" crossorigin src="./assets/index-ad762ffd.js"></script>
<link rel="stylesheet" href="./assets/index-fecb6dd4.css">
</head>
<body>

View File

@ -20,6 +20,7 @@
"upscaling": "Upscaling",
"upscale": "Upscale",
"upscaleImage": "Upscale Image",
"denoisingStrength": "Denoising Strength",
"scale": "Scale",
"otherOptions": "Other Options",
"seamlessTiling": "Seamless Tiling",

View File

@ -20,6 +20,7 @@
"upscaling": "Upscaling",
"upscale": "Upscale",
"upscaleImage": "Upscale Image",
"denoisingStrength": "Denoising Strength",
"scale": "Scale",
"otherOptions": "Other Options",
"seamlessTiling": "Seamless Tiling",

View File

@ -92,6 +92,7 @@ export declare type ESRGANMetadata = CommonPostProcessedImageMetadata & {
type: 'esrgan';
scale: 2 | 4;
strength: number;
denoise_str: number;
};
export declare type FacetoolMetadata = CommonPostProcessedImageMetadata & {

View File

@ -93,11 +93,15 @@ const makeSocketIOEmitters = (
dispatch(setIsProcessing(true));
const {
postprocessing: { upscalingLevel, upscalingStrength },
postprocessing: {
upscalingLevel,
upscalingDenoising,
upscalingStrength,
},
} = getState();
const esrganParameters = {
upscale: [upscalingLevel, upscalingStrength],
upscale: [upscalingLevel, upscalingDenoising, upscalingStrength],
};
socketio.emit('runPostprocessing', imageToProcess, {
type: 'esrgan',

View File

@ -69,6 +69,7 @@ export type BackendGenerationParameters = {
export type BackendEsrGanParameters = {
level: UpscalingLevel;
denoise_str: number;
strength: number;
};
@ -111,13 +112,12 @@ export const frontendToBackendParameters = (
shouldRunFacetool,
upscalingLevel,
upscalingStrength,
upscalingDenoising,
} = postprocessingState;
const {
cfgScale,
height,
img2imgStrength,
infillMethod,
initialImage,
@ -136,11 +136,9 @@ export const frontendToBackendParameters = (
shouldFitToWidthHeight,
shouldGenerateVariations,
shouldRandomizeSeed,
steps,
threshold,
tileSize,
variationAmount,
width,
} = generationState;
@ -190,6 +188,7 @@ export const frontendToBackendParameters = (
if (shouldRunESRGAN) {
esrganParameters = {
level: upscalingLevel,
denoise_str: upscalingDenoising,
strength: upscalingStrength,
};
}

View File

@ -34,6 +34,7 @@ import {
setFacetoolStrength,
setFacetoolType,
setHiresFix,
setUpscalingDenoising,
setUpscalingLevel,
setUpscalingStrength,
} from 'features/parameters/store/postprocessingSlice';
@ -147,11 +148,11 @@ const ImageMetadataViewer = memo(
postprocessing,
prompt,
sampler,
scale,
seamless,
seed,
steps,
strength,
denoise_str,
threshold,
type,
variations,
@ -184,27 +185,6 @@ const ImageMetadataViewer = memo(
{['esrgan', 'gfpgan'].includes(type) && (
<MetadataItem label="Original image" value={orig_path} />
)}
{type === 'gfpgan' && strength !== undefined && (
<MetadataItem
label="Fix faces strength"
value={strength}
onClick={() => dispatch(setFacetoolStrength(strength))}
/>
)}
{type === 'esrgan' && scale !== undefined && (
<MetadataItem
label="Upscaling scale"
value={scale}
onClick={() => dispatch(setUpscalingLevel(scale))}
/>
)}
{type === 'esrgan' && strength !== undefined && (
<MetadataItem
label="Upscaling strength"
value={strength}
onClick={() => dispatch(setUpscalingStrength(strength))}
/>
)}
{prompt && (
<MetadataItem
label="Prompt"
@ -331,7 +311,7 @@ const ImageMetadataViewer = memo(
i: number
) => {
if (postprocess.type === 'esrgan') {
const { scale, strength } = postprocess;
const { scale, strength, denoise_str } = postprocess;
return (
<Flex
key={i}
@ -354,6 +334,15 @@ const ImageMetadataViewer = memo(
dispatch(setUpscalingStrength(strength))
}
/>
{denoise_str !== undefined && (
<MetadataItem
label="Denoising strength"
value={denoise_str}
onClick={() =>
dispatch(setUpscalingDenoising(denoise_str))
}
/>
)}
</Flex>
);
} else if (postprocess.type === 'gfpgan') {

View File

@ -1,5 +0,0 @@
.upscale-settings {
display: grid;
grid-template-columns: auto 1fr;
column-gap: 1rem;
}

View File

@ -1,6 +1,7 @@
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
import {
setUpscalingDenoising,
setUpscalingLevel,
setUpscalingStrength,
UpscalingLevel,
@ -8,20 +9,25 @@ import {
import { createSelector } from '@reduxjs/toolkit';
import { UPSCALING_LEVELS } from 'app/constants';
import IAINumberInput from 'common/components/IAINumberInput';
import IAISelect from 'common/components/IAISelect';
import { postprocessingSelector } from 'features/parameters/store/postprocessingSelectors';
import { systemSelector } from 'features/system/store/systemSelectors';
import { isEqual } from 'lodash';
import { ChangeEvent } from 'react';
import { useTranslation } from 'react-i18next';
import IAISlider from 'common/components/IAISlider';
import { Flex } from '@chakra-ui/react';
const parametersSelector = createSelector(
[postprocessingSelector, systemSelector],
({ upscalingLevel, upscalingStrength }, { isESRGANAvailable }) => {
(
{ upscalingLevel, upscalingStrength, upscalingDenoising },
{ isESRGANAvailable }
) => {
return {
upscalingLevel,
upscalingDenoising,
upscalingStrength,
isESRGANAvailable,
};
@ -38,8 +44,12 @@ const parametersSelector = createSelector(
*/
const UpscaleSettings = () => {
const dispatch = useAppDispatch();
const { upscalingLevel, upscalingStrength, isESRGANAvailable } =
useAppSelector(parametersSelector);
const {
upscalingLevel,
upscalingStrength,
upscalingDenoising,
isESRGANAvailable,
} = useAppSelector(parametersSelector);
const { t } = useTranslation();
@ -49,7 +59,7 @@ const UpscaleSettings = () => {
const handleChangeStrength = (v: number) => dispatch(setUpscalingStrength(v));
return (
<div className="upscale-settings">
<Flex flexDir="column" rowGap="1rem" minWidth="20rem">
<IAISelect
isDisabled={!isESRGANAvailable}
label={t('parameters:scale')}
@ -57,17 +67,39 @@ const UpscaleSettings = () => {
onChange={handleChangeLevel}
validValues={UPSCALING_LEVELS}
/>
<IAINumberInput
isDisabled={!isESRGANAvailable}
label={t('parameters:strength')}
step={0.05}
<IAISlider
label={t('parameters:denoisingStrength')}
value={upscalingDenoising}
min={0}
max={1}
onChange={handleChangeStrength}
value={upscalingStrength}
isInteger={false}
step={0.01}
onChange={(v) => {
dispatch(setUpscalingDenoising(v));
}}
handleReset={() => dispatch(setUpscalingDenoising(0.75))}
withSliderMarks
withInput
withReset
isSliderDisabled={!isESRGANAvailable}
isInputDisabled={!isESRGANAvailable}
isResetDisabled={!isESRGANAvailable}
/>
</div>
<IAISlider
label={`${t('parameters:upscale')} ${t('parameters:strength')}`}
value={upscalingStrength}
min={0}
max={1}
step={0.05}
onChange={handleChangeStrength}
handleReset={() => dispatch(setUpscalingStrength(0.75))}
withSliderMarks
withInput
withReset
isSliderDisabled={!isESRGANAvailable}
isInputDisabled={!isESRGANAvailable}
isResetDisabled={!isESRGANAvailable}
/>
</Flex>
);
};

View File

@ -195,8 +195,11 @@ export const generationSlice = createSlice({
} else {
state.threshold = threshold;
}
if (perlin) state.perlin = perlin;
if (typeof perlin === 'undefined') state.perlin = 0;
if (typeof perlin === 'undefined') {
state.perlin = 0;
} else {
state.perlin = perlin;
}
if (typeof seamless === 'boolean') state.seamless = seamless;
// if (typeof hires_fix === 'boolean') state.hiresFix = hires_fix; // TODO: Needs to be fixed after reorg
if (width) state.width = width;
@ -268,10 +271,16 @@ export const generationSlice = createSlice({
if (sampler) state.sampler = sampler;
if (steps) state.steps = steps;
if (cfg_scale) state.cfgScale = cfg_scale;
if (threshold) state.threshold = threshold;
if (typeof threshold === 'undefined') state.threshold = 0;
if (perlin) state.perlin = perlin;
if (typeof perlin === 'undefined') state.perlin = 0;
if (typeof threshold === 'undefined') {
state.threshold = 0;
} else {
state.threshold = threshold;
}
if (typeof perlin === 'undefined') {
state.perlin = 0;
} else {
state.perlin = perlin;
}
if (typeof seamless === 'boolean') state.seamless = seamless;
// if (typeof hires_fix === 'boolean') state.hiresFix = hires_fix; // TODO: Needs to be fixed after reorg
if (width) state.width = width;

View File

@ -16,6 +16,7 @@ export interface PostprocessingState {
shouldRunESRGAN: boolean;
shouldRunFacetool: boolean;
upscalingLevel: UpscalingLevel;
upscalingDenoising: number;
upscalingStrength: number;
}
@ -29,6 +30,7 @@ const initialPostprocessingState: PostprocessingState = {
shouldRunESRGAN: false,
shouldRunFacetool: false,
upscalingLevel: 4,
upscalingDenoising: 0.75,
upscalingStrength: 0.75,
};
@ -47,6 +49,9 @@ export const postprocessingSlice = createSlice({
setUpscalingLevel: (state, action: PayloadAction<UpscalingLevel>) => {
state.upscalingLevel = action.payload;
},
setUpscalingDenoising: (state, action: PayloadAction<number>) => {
state.upscalingDenoising = action.payload;
},
setUpscalingStrength: (state, action: PayloadAction<number>) => {
state.upscalingStrength = action.payload;
},
@ -88,6 +93,7 @@ export const {
setShouldRunESRGAN,
setShouldRunFacetool,
setUpscalingLevel,
setUpscalingDenoising,
setUpscalingStrength,
} = postprocessingSlice.actions;

View File

@ -27,7 +27,6 @@
@use '../features/parameters/components/ProcessButtons/ProcessButtons.scss';
@use '../features/parameters/components/MainParameters/MainParameters.scss';
@use '../features/parameters/components/AccordionItems/AdvancedSettings.scss';
@use '../features/parameters/components/AdvancedParameters/Upscale/UpscaleSettings.scss';
@use '../features/parameters/components/AdvancedParameters/Canvas/BoundingBox/BoundingBoxSettings.scss';
// gallery

File diff suppressed because one or more lines are too long

View File

@ -321,6 +321,7 @@ class Generate:
codeformer_fidelity = None,
save_original = False,
upscale = None,
upscale_denoise_str = 0.75,
# this is specific to inpainting and causes more extreme inpainting
inpaint_replace = 0.0,
# This controls the size at which inpaint occurs (scaled up for inpaint, then back down for the result)
@ -560,6 +561,7 @@ class Generate:
if upscale is not None or facetool_strength > 0:
self.upscale_and_reconstruct(results,
upscale = upscale,
upscale_denoise_str = upscale_denoise_str,
facetool = facetool,
strength = facetool_strength,
codeformer_fidelity = codeformer_fidelity,
@ -633,6 +635,7 @@ class Generate:
facetool_strength = 0.0,
codeformer_fidelity = 0.75,
upscale = None,
upscale_denoise_str = 0.75,
out_direction = None,
outcrop = [],
save_original = True, # to get new name
@ -684,6 +687,7 @@ class Generate:
codeformer_fidelity = codeformer_fidelity,
save_original = save_original,
upscale = upscale,
upscale_denoise_str = upscale_denoise_str,
image_callback = callback,
prefix = prefix,
)
@ -952,6 +956,7 @@ class Generate:
image_list,
facetool = 'gfpgan',
upscale = None,
upscale_denoise_str = 0.75,
strength = 0.0,
codeformer_fidelity = 0.75,
save_original = False,
@ -982,7 +987,7 @@ class Generate:
if len(upscale) < 2:
upscale.append(0.75)
image = self.esrgan.process(
image, upscale[1], seed, int(upscale[0]))
image, upscale[1], seed, int(upscale[0]), denoise_str=upscale_denoise_str)
else:
print(">> ESRGAN is disabled. Image not upscaled.")
except Exception as e:

View File

@ -58,12 +58,9 @@ def main():
print(f'>> Internet connectivity is {Globals.internet_available}')
if not args.conf:
if not os.path.exists(os.path.join(Globals.root,'configs','models.yaml')):
report_model_error(opt, e)
# print(f"\n** Error. The file {os.path.join(Globals.root,'configs','models.yaml')} could not be found.")
# print('** Please check the location of your invokeai directory and use the --root_dir option to point to the correct path.')
# print('** This script will now exit.')
# sys.exit(-1)
config_file = os.path.join(Globals.root,'configs','models.yaml')
if not os.path.exists(config_file):
report_model_error(opt, FileNotFoundError(f"The file {config_file} could not be found."))
print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}')
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
@ -658,7 +655,9 @@ def import_ckpt_model(path_or_url: Union[Path, str], gen, opt, completer) -> Opt
model_description=default_description
)
config_file = None
default = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml')
default = Path(Globals.root,'configs/stable-diffusion/v1-inpainting-inference.yaml') \
if re.search('inpaint',default_name, flags=re.IGNORECASE) \
else Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml')
completer.complete_extensions(('.yaml','.yml'))
completer.set_line(str(default))
@ -709,12 +708,21 @@ def _get_model_name_and_desc(model_manager,completer,model_name:str='',model_des
model_description = input(f'Description for this model [{model_description}]: ').strip() or model_description
return model_name, model_description
def optimize_model(model_name_or_path:str, gen, opt, completer):
def _is_inpainting(model_name_or_path: str)->bool:
if re.search('inpaint',model_name_or_path, flags=re.IGNORECASE):
return not input('Is this an inpainting model? [y] ').startswith(('n','N'))
else:
return not input('Is this an inpainting model? [n] ').startswith(('y','Y'))
def optimize_model(model_name_or_path: str, gen, opt, completer):
manager = gen.model_manager
ckpt_path = None
original_config_file = None
if (model_info := manager.model_info(model_name_or_path)):
if model_name_or_path == gen.model_name:
print("** Can't convert the active model. !switch to another model first. **")
return
elif (model_info := manager.model_info(model_name_or_path)):
if 'weights' in model_info:
ckpt_path = Path(model_info['weights'])
original_config_file = Path(model_info['config'])
@ -731,7 +739,7 @@ def optimize_model(model_name_or_path:str, gen, opt, completer):
ckpt_path.stem,
f'Converted model {ckpt_path.stem}'
)
is_inpainting = input('Is this an inpainting model? [n] ').startswith(('y','Y'))
is_inpainting = _is_inpainting(model_name_or_path)
original_config_file = Path(
'configs',
'stable-diffusion',
@ -889,6 +897,7 @@ def do_postprocess (gen, opt, callback):
codeformer_fidelity = opt.codeformer_fidelity,
save_original = opt.save_original,
upscale = opt.upscale,
upscale_denoise_str = opt.esrgan_denoise_str,
out_direction = opt.out_direction,
outcrop = opt.outcrop,
callback = callback,
@ -950,7 +959,7 @@ def prepare_image_metadata(
print(f'** The filename format contains an unknown key \'{e.args[0]}\'. Will use {{prefix}}.{{seed}}.png\' instead')
filename = f'{prefix}.{seed}.png'
except IndexError:
print(f'** The filename format is broken or complete. Will use \'{{prefix}}.{{seed}}.png\' instead')
print("** The filename format is broken or complete. Will use '{prefix}.{seed}.png' instead")
filename = f'{prefix}.{seed}.png'
if opt.variation_amount > 0:

View File

@ -1 +1 @@
__version__='2.3.0-rc7'
__version__='2.3.0'

View File

@ -671,6 +671,12 @@ class Args(object):
default=400,
help='Tile size for background sampler, 0 for no tile during testing. Default: 400.',
)
postprocessing_group.add_argument(
'--esrgan_denoise_str',
type=float,
default=0.75,
help='esrgan denoise str. 0 is no denoise, 1 is max denoise. Default: 0.75',
)
postprocessing_group.add_argument(
'--gfpgan_model_path',
type=str,

View File

@ -128,7 +128,7 @@ script do it for you. Manual installation is described at:
https://invoke-ai.github.io/InvokeAI/installation/020_INSTALL_MANUAL/
You may download the recommended models (about 15GB total), install all models (40 GB!!)
You may download the recommended models (about 15GB total), install all models (40 GB!!)
select a customized set, or completely skip this step.
"""
)
@ -583,7 +583,7 @@ def new_config_file_contents(successfully_downloaded: dict, config_file: Path, o
# model is a diffusers (indicated with a path)
if conf.get(model) and Path(successfully_downloaded[model]).is_dir():
offer_to_delete_weights(model, conf[model], opt.yes_to_all)
stanza = {}
mod = Datasets[model]
stanza["description"] = mod["description"]
@ -635,7 +635,7 @@ def offer_to_delete_weights(model_name: str, conf_stanza: dict, yes_to_all: bool
weights.unlink()
except OSError as e:
print(str(e))
# ---------------------------------------------
# this will preload the Bert tokenizer fles
def download_bert():
@ -683,10 +683,18 @@ def download_clip():
def download_realesrgan():
print("Installing models from RealESRGAN...", file=sys.stderr)
model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth"
wdn_model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth"
model_dest = os.path.join(
Globals.root, "models/realesrgan/realesr-general-x4v3.pth"
)
wdn_model_dest = os.path.join(
Globals.root, "models/realesrgan/realesr-general-wdn-x4v3.pth"
)
download_with_progress_bar(model_url, model_dest, "RealESRGAN")
download_with_progress_bar(wdn_model_url, wdn_model_dest, "RealESRGANwdn")
def download_gfpgan():

View File

@ -16,7 +16,7 @@ class ESRGAN():
else:
use_half_precision = True
def load_esrgan_bg_upsampler(self):
def load_esrgan_bg_upsampler(self, denoise_str):
if not torch.cuda.is_available(): # CPU or MPS on M1
use_half_precision = False
else:
@ -26,14 +26,16 @@ class ESRGAN():
from realesrgan import RealESRGANer
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
model_path = os.path.join(Globals.root,'models/realesrgan/realesr-general-x4v3.pth')
model_path = os.path.join(Globals.root, 'models/realesrgan/realesr-general-x4v3.pth')
wdn_model_path = os.path.join(Globals.root, 'models/realesrgan/realesr-general-wdn-x4v3.pth')
scale = 4
bg_upsampler = RealESRGANer(
scale=scale,
model_path=model_path,
model_path=[model_path, wdn_model_path],
model=model,
tile=self.bg_tile_size,
dni_weight=[denoise_str, 1 - denoise_str],
tile_pad=10,
pre_pad=0,
half=use_half_precision,
@ -41,13 +43,13 @@ class ESRGAN():
return bg_upsampler
def process(self, image: ImageType, strength: float, seed: str = None, upsampler_scale: int = 2):
def process(self, image: ImageType, strength: float, seed: str = None, upsampler_scale: int = 2, denoise_str: float = 0.75):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=UserWarning)
try:
upsampler = self.load_esrgan_bg_upsampler()
upsampler = self.load_esrgan_bg_upsampler(denoise_str)
except Exception:
import traceback
import sys
@ -60,7 +62,7 @@ class ESRGAN():
if seed is not None:
print(
f'>> Real-ESRGAN Upscaling seed:{seed} : scale:{upsampler_scale}x'
f'>> Real-ESRGAN Upscaling seed:{seed}, scale:{upsampler_scale}x, tile:{self.bg_tile_size}, denoise:{denoise_str}'
)
# ESRGAN outputs images with partial transparency if given RGBA images; convert to RGB
image = image.convert("RGB")