Merge branch 'main' into feat/api/image-max-age

This commit is contained in:
Mary Hipp Rogers 2023-07-14 08:21:44 -04:00 committed by GitHub
commit 545e2f557f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
243 changed files with 6913 additions and 6059 deletions

View File

@ -1,25 +1,9 @@
# use this file as a whitelist
*
!invokeai
!ldm
!pyproject.toml
!docker/docker-entrypoint.sh
!LICENSE
# ignore frontend/web but whitelist dist
invokeai/frontend/web/
!invokeai/frontend/web/dist/
# ignore invokeai/assets but whitelist invokeai/assets/web
invokeai/assets/
!invokeai/assets/web/
# Guard against pulling in any models that might exist in the directory tree
**/*.pt*
**/*.ckpt
# Byte-compiled / optimized / DLL files
**/__pycache__/
**/*.py[cod]
# Distribution / packaging
**/*.egg-info/
**/*.egg
**/node_modules
**/__pycache__
**/*.egg-info

View File

@ -3,17 +3,15 @@ on:
push:
branches:
- 'main'
- 'update/ci/docker/*'
- 'update/docker/*'
- 'dev/ci/docker/*'
- 'dev/docker/*'
paths:
- 'pyproject.toml'
- '.dockerignore'
- 'invokeai/**'
- 'docker/Dockerfile'
- 'docker/docker-entrypoint.sh'
- 'workflows/build-container.yml'
tags:
- 'v*.*.*'
- 'v*'
workflow_dispatch:
permissions:
@ -26,23 +24,27 @@ jobs:
strategy:
fail-fast: false
matrix:
flavor:
- rocm
- cuda
- cpu
include:
- flavor: rocm
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
- flavor: cuda
pip-extra-index-url: ''
- flavor: cpu
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
gpu-driver:
- cuda
- cpu
- rocm
runs-on: ubuntu-latest
name: ${{ matrix.flavor }}
name: ${{ matrix.gpu-driver }}
env:
PLATFORMS: 'linux/amd64,linux/arm64'
DOCKERFILE: 'docker/Dockerfile'
# torch/arm64 does not support GPU currently, so arm64 builds
# would not be GPU-accelerated.
# re-enable arm64 if there is sufficient demand.
# PLATFORMS: 'linux/amd64,linux/arm64'
PLATFORMS: 'linux/amd64'
steps:
- name: Free up more disk space on the runner
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
run: |
sudo rm -rf /usr/share/dotnet
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
sudo swapoff /mnt/swapfile
sudo rm -rf /mnt/swapfile
- name: Checkout
uses: actions/checkout@v3
@ -53,7 +55,7 @@ jobs:
github-token: ${{ secrets.GITHUB_TOKEN }}
images: |
ghcr.io/${{ github.repository }}
${{ vars.DOCKERHUB_REPOSITORY }}
${{ env.DOCKERHUB_REPOSITORY }}
tags: |
type=ref,event=branch
type=ref,event=tag
@ -62,8 +64,8 @@ jobs:
type=pep440,pattern={{major}}
type=sha,enable=true,prefix=sha-,format=short
flavor: |
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
suffix=-${{ matrix.flavor }},onlatest=false
latest=${{ matrix.gpu-driver == 'cuda' && github.ref == 'refs/heads/main' }}
suffix=-${{ matrix.gpu-driver }},onlatest=false
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
@ -81,34 +83,33 @@ jobs:
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub
if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
# - name: Login to Docker Hub
# if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
# uses: docker/login-action@v2
# with:
# username: ${{ secrets.DOCKERHUB_USERNAME }}
# password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build container
id: docker_build
uses: docker/build-push-action@v4
with:
context: .
file: ${{ env.DOCKERFILE }}
file: docker/Dockerfile
platforms: ${{ env.PLATFORMS }}
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
cache-from: |
type=gha,scope=${{ github.ref_name }}-${{ matrix.flavor }}
type=gha,scope=main-${{ matrix.flavor }}
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.flavor }}
type=gha,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
type=gha,scope=main-${{ matrix.gpu-driver }}
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.gpu-driver }}
- name: Docker Hub Description
if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
uses: peter-evans/dockerhub-description@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
repository: ${{ vars.DOCKERHUB_REPOSITORY }}
short-description: ${{ github.event.repository.description }}
# - name: Docker Hub Description
# if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
# uses: peter-evans/dockerhub-description@v3
# with:
# username: ${{ secrets.DOCKERHUB_USERNAME }}
# password: ${{ secrets.DOCKERHUB_TOKEN }}
# repository: ${{ vars.DOCKERHUB_REPOSITORY }}
# short-description: ${{ github.event.repository.description }}

189
LICENSE
View File

@ -1,21 +1,176 @@
MIT License
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
Copyright (c) 2022 InvokeAI Team
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
1. Definitions.
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

View File

@ -3,8 +3,8 @@
![project hero](https://github.com/invoke-ai/InvokeAI/assets/31807370/1a917d94-e099-4fa1-a70f-7dd8d0691018)
# Invoke AI - Generative AI for Professional Creatives
## Image Generation for Stable Diffusion, Custom-Trained Models, and more.
Learn more about us and get started instantly at [invoke.ai](https://invoke.ai)
## Professional Creative Tools for Stable Diffusion, Custom-Trained Models, and more.
To learn more about Invoke AI, get started instantly, or implement our Business solutions, visit [invoke.ai](https://invoke.ai)
[![discord badge]][discord link]
@ -329,24 +329,24 @@ InvokeAI offers a locally hosted Web Server & React Frontend, with an industry l
The Unified Canvas is a fully integrated canvas implementation with support for all core generation capabilities, in/outpainting, brush tools, and more. This creative tool unlocks the capability for artists to create with AI as a creative collaborator, and can be used to augment AI-generated imagery, sketches, photography, renders, and more.
### *Advanced Prompt Syntax*
### *Node Architecture & Editor (Beta)*
Invoke AI's advanced prompt syntax allows for token weighting, cross-attention control, and prompt blending, allowing for fine-tuned tweaking of your invocations and exploration of the latent space.
Invoke AI's backend is built on a graph-based execution architecture. This allows for customizable generation pipelines to be developed by professional users looking to create specific workflows to support their production use-cases, and will be extended in the future with additional capabilities.
### *Command Line Interface*
### *Board & Gallery Management*
For users utilizing a terminal-based environment, or who want to take advantage of CLI features, InvokeAI offers an extensive and actively supported command-line interface that provides the full suite of generation functionality available in the tool.
Invoke AI provides an organized gallery system for easily storing, accessing, and remixing your content in the Invoke workspace. Images can be dragged/dropped onto any Image-base UI element in the application, and rich metadata within the Image allows for easy recall of key prompts or settings used in your workflow.
### Other features
- *Support for both ckpt and diffusers models*
- *SD 2.0, 2.1 support*
- *Upscaling & Face Restoration Tools*
- *Upscaling Tools*
- *Embedding Manager & Support*
- *Model Manager & Support*
- *Node-Based Architecture*
- *Node-Based Plug-&-Play UI (Beta)*
- *Boards & Gallery Management
- *SDXL Support* (Coming soon)
### Latest Changes
@ -359,7 +359,7 @@ Notes](https://github.com/invoke-ai/InvokeAI/releases) and the
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
problems and other issues.
## 🤝 Contributing
## Contributing
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
cleanup, testing, or code reviews, is very much encouraged to do so.
@ -378,7 +378,7 @@ to become part of our community.
Welcome to InvokeAI!
### 👥 Contributors
### Contributors
This fork is a combined effort of various people from across the world.
[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for

13
docker/.env.sample Normal file
View File

@ -0,0 +1,13 @@
## Make a copy of this file named `.env` and fill in the values below.
## Any environment variables supported by InvokeAI can be specified here.
# INVOKEAI_ROOT is the path to a path on the local filesystem where InvokeAI will store data.
# Outputs will also be stored here by default.
# This **must** be an absolute path.
INVOKEAI_ROOT=
HUGGINGFACE_TOKEN=
## optional variables specific to the docker setup
# GPU_DRIVER=cuda
# CONTAINER_UID=1000

View File

@ -1,107 +1,129 @@
# syntax=docker/dockerfile:1
# syntax=docker/dockerfile:1.4
ARG PYTHON_VERSION=3.9
##################
## base image ##
##################
FROM --platform=${TARGETPLATFORM} python:${PYTHON_VERSION}-slim AS python-base
## Builder stage
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
FROM library/ubuntu:22.04 AS builder
# Prepare apt for buildkit cache
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
ARG DEBIAN_FRONTEND=noninteractive
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt update && apt-get install -y \
git \
python3.10-venv \
python3-pip \
build-essential
# Install dependencies
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get install -y \
--no-install-recommends \
libgl1-mesa-glx=20.3.* \
libglib2.0-0=2.66.* \
libopencv-dev=4.5.*
ENV INVOKEAI_SRC=/opt/invokeai
ENV VIRTUAL_ENV=/opt/venv/invokeai
# Set working directory and env
ARG APPDIR=/usr/src
ARG APPNAME=InvokeAI
WORKDIR ${APPDIR}
ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH
# Keeps Python from generating .pyc files in the container
ENV PYTHONDONTWRITEBYTECODE 1
# Turns off buffering for easier container logging
ENV PYTHONUNBUFFERED 1
# Don't fall back to legacy build system
ENV PIP_USE_PEP517=1
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
ARG TORCH_VERSION=2.0.1
ARG TORCHVISION_VERSION=0.15.2
ARG GPU_DRIVER=cuda
ARG TARGETPLATFORM="linux/amd64"
# unused but available
ARG BUILDPLATFORM
#######################
## build pyproject ##
#######################
FROM python-base AS pyproject-builder
WORKDIR ${INVOKEAI_SRC}
# Install build dependencies
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get install -y \
--no-install-recommends \
build-essential=12.9 \
gcc=4:10.2.* \
python3-dev=3.9.*
# Install pytorch before all other pip packages
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
# x86_64/CUDA is default
RUN --mount=type=cache,target=/root/.cache/pip \
python3 -m venv ${VIRTUAL_ENV} &&\
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
elif [ "$GPU_DRIVER" = "rocm" ]; then \
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.4.2"; \
else \
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu118"; \
fi &&\
pip install $extra_index_url_arg \
torch==$TORCH_VERSION \
torchvision==$TORCHVISION_VERSION
# Prepare pip for buildkit cache
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
RUN mkdir -p ${PIP_CACHE_DIR}
# Install the local package.
# Editable mode helps use the same image for development:
# the local working copy can be bind-mounted into the image
# at path defined by ${INVOKEAI_SRC}
COPY invokeai ./invokeai
COPY pyproject.toml ./
RUN --mount=type=cache,target=/root/.cache/pip \
# xformers + triton fails to install on arm64
if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
pip install -e ".[xformers]"; \
else \
pip install -e "."; \
fi
# Create virtual environment
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
python3 -m venv "${APPNAME}" \
--upgrade-deps
# #### Build the Web UI ------------------------------------
# Install requirements
COPY --link pyproject.toml .
COPY --link invokeai/version/invokeai_version.py invokeai/version/__init__.py invokeai/version/
ARG PIP_EXTRA_INDEX_URL
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
"${APPNAME}"/bin/pip install .
FROM node:18 AS web-builder
WORKDIR /build
COPY invokeai/frontend/web/ ./
RUN --mount=type=cache,target=/usr/lib/node_modules \
npm install --include dev
RUN --mount=type=cache,target=/usr/lib/node_modules \
yarn vite build
# Install pyproject.toml
COPY --link . .
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
"${APPNAME}/bin/pip" install .
# Build patchmatch
#### Runtime stage ---------------------------------------
FROM library/ubuntu:22.04 AS runtime
ARG DEBIAN_FRONTEND=noninteractive
ENV PYTHONUNBUFFERED=1
ENV PYTHONDONTWRITEBYTECODE=1
RUN apt update && apt install -y --no-install-recommends \
git \
curl \
vim \
tmux \
ncdu \
iotop \
bzip2 \
gosu \
libglib2.0-0 \
libgl1-mesa-glx \
python3-venv \
python3-pip \
build-essential \
libopencv-dev \
libstdc++-10-dev &&\
apt-get clean && apt-get autoclean
# globally add magic-wormhole
# for ease of transferring data to and from the container
# when running in sandboxed cloud environments; e.g. Runpod etc.
RUN pip install magic-wormhole
ENV INVOKEAI_SRC=/opt/invokeai
ENV VIRTUAL_ENV=/opt/venv/invokeai
ENV INVOKEAI_ROOT=/invokeai
ENV PATH="$VIRTUAL_ENV/bin:$INVOKEAI_SRC:$PATH"
# --link requires buldkit w/ dockerfile syntax 1.4
COPY --link --from=builder ${INVOKEAI_SRC} ${INVOKEAI_SRC}
COPY --link --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist
# Link amdgpu.ids for ROCm builds
# contributed by https://github.com/Rubonnek
RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids"
WORKDIR ${INVOKEAI_SRC}
# build patchmatch
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
RUN python3 -c "from patchmatch import patch_match"
#####################
## runtime image ##
#####################
FROM python-base AS runtime
# Create unprivileged user and make the local dir
RUN useradd --create-home --shell /bin/bash -u 1000 --comment "container local user" invoke
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R invoke:invoke ${INVOKEAI_ROOT}
# Create a new user
ARG UNAME=appuser
RUN useradd \
--no-log-init \
-m \
-U \
"${UNAME}"
# Create volume directory
ARG VOLUME_DIR=/data
RUN mkdir -p "${VOLUME_DIR}" \
&& chown -hR "${UNAME}:${UNAME}" "${VOLUME_DIR}"
# Setup runtime environment
USER ${UNAME}:${UNAME}
COPY --chown=${UNAME}:${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME}
ENV INVOKEAI_ROOT ${VOLUME_DIR}
ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache
ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only"
EXPOSE 9090
ENTRYPOINT [ "invokeai" ]
CMD [ "--web", "--host", "0.0.0.0", "--port", "9090" ]
VOLUME [ "${VOLUME_DIR}" ]
COPY docker/docker-entrypoint.sh ./
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
CMD ["invokeai-web", "--host", "0.0.0.0"]

77
docker/README.md Normal file
View File

@ -0,0 +1,77 @@
# InvokeAI Containerized
All commands are to be run from the `docker` directory: `cd docker`
#### Linux
1. Ensure builkit is enabled in the Docker daemon settings (`/etc/docker/daemon.json`)
2. Install the `docker compose` plugin using your package manager, or follow a [tutorial](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-compose-on-ubuntu-22-04).
- The deprecated `docker-compose` (hyphenated) CLI continues to work for now.
3. Ensure docker daemon is able to access the GPU.
- You may need to install [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
#### macOS
1. Ensure Docker has at least 16GB RAM
2. Enable VirtioFS for file sharing
3. Enable `docker compose` V2 support
This is done via Docker Desktop preferences
## Quickstart
1. Make a copy of `env.sample` and name it `.env` (`cp env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to:
a. the desired location of the InvokeAI runtime directory, or
b. an existing, v3.0.0 compatible runtime directory.
1. `docker compose up`
The image will be built automatically if needed.
The runtime directory (holding models and outputs) will be created in the location specified by `INVOKEAI_ROOT`. The default location is `~/invokeai`. The runtime directory will be populated with the base configs and models necessary to start generating.
### Use a GPU
- Linux is *recommended* for GPU support in Docker.
- WSL2 is *required* for Windows.
- only `x86_64` architecture is supported.
The Docker daemon on the system must be already set up to use the GPU. In case of Linux, this involves installing `nvidia-docker-runtime` and configuring the `nvidia` runtime as default. Steps will be different for AMD. Please see Docker documentation for the most up-to-date instructions for using your GPU with Docker.
## Customize
Check the `.env.sample` file. It contains some environment variables for running in Docker. Copy it, name it `.env`, and fill it in with your own values. Next time you run `docker compose up`, your custom values will be used.
You can also set these values in `docker compose.yml` directly, but `.env` will help avoid conflicts when code is updated.
Example (most values are optional):
```
INVOKEAI_ROOT=/Volumes/WorkDrive/invokeai
HUGGINGFACE_TOKEN=the_actual_token
CONTAINER_UID=1000
GPU_DRIVER=cuda
```
## Even Moar Customizing!
See the `docker compose.yaml` file. The `command` instruction can be uncommented and used to run arbitrary startup commands. Some examples below.
### Reconfigure the runtime directory
Can be used to download additional models from the supported model list
In conjunction with `INVOKEAI_ROOT` can be also used to initialize a runtime directory
```
command:
- invokeai-configure
- --yes
```
Or install models:
```
command:
- invokeai-model-install
```

View File

@ -1,51 +1,11 @@
#!/usr/bin/env bash
set -e
# If you want to build a specific flavor, set the CONTAINER_FLAVOR environment variable
# e.g. CONTAINER_FLAVOR=cpu ./build.sh
# Possible Values are:
# - cpu
# - cuda
# - rocm
# Don't forget to also set it when executing run.sh
# if it is not set, the script will try to detect the flavor by itself.
#
# Doc can be found here:
# https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
build_args=""
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
cd "$SCRIPTDIR" || exit 1
[[ -f ".env" ]] && build_args=$(awk '$1 ~ /\=[^$]/ {print "--build-arg " $0 " "}' .env)
source ./env.sh
echo "docker-compose build args:"
echo $build_args
DOCKERFILE=${INVOKE_DOCKERFILE:-./Dockerfile}
# print the settings
echo -e "You are using these values:\n"
echo -e "Dockerfile:\t\t${DOCKERFILE}"
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
echo -e "Volumename:\t\t${VOLUMENAME}"
echo -e "Platform:\t\t${PLATFORM}"
echo -e "Container Registry:\t${CONTAINER_REGISTRY}"
echo -e "Container Repository:\t${CONTAINER_REPOSITORY}"
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
echo -e "Container Flavor:\t${CONTAINER_FLAVOR}"
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
# Create docker volume
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
echo -e "Volume already exists\n"
else
echo -n "creating docker volume "
docker volume create "${VOLUMENAME}"
fi
# Build Container
docker build \
--platform="${PLATFORM:-linux/amd64}" \
--tag="${CONTAINER_IMAGE:-invokeai}" \
${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
--file="${DOCKERFILE}" \
..
docker-compose build $build_args

48
docker/docker-compose.yml Normal file
View File

@ -0,0 +1,48 @@
# Copyright (c) 2023 Eugene Brodsky https://github.com/ebr
version: '3.8'
services:
invokeai:
image: "local/invokeai:latest"
# edit below to run on a container runtime other than nvidia-container-runtime.
# not yet tested with rocm/AMD GPUs
# Comment out the "deploy" section to run on CPU only
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
build:
context: ..
dockerfile: docker/Dockerfile
# variables without a default will automatically inherit from the host environment
environment:
- INVOKEAI_ROOT
- HF_HOME
# Create a .env file in the same directory as this docker-compose.yml file
# and populate it with environment variables. See .env.sample
env_file:
- .env
ports:
- "${INVOKEAI_PORT:-9090}:9090"
volumes:
- ${INVOKEAI_ROOT:-~/invokeai}:${INVOKEAI_ROOT:-/invokeai}
- ${HF_HOME:-~/.cache/huggingface}:${HF_HOME:-/invokeai/.cache/huggingface}
# - ${INVOKEAI_MODELS_DIR:-${INVOKEAI_ROOT:-/invokeai/models}}
# - ${INVOKEAI_MODELS_CONFIG_PATH:-${INVOKEAI_ROOT:-/invokeai/configs/models.yaml}}
tty: true
stdin_open: true
# # Example of running alternative commands/scripts in the container
# command:
# - bash
# - -c
# - |
# invokeai-model-install --yes --default-only --config_file ${INVOKEAI_ROOT}/config_custom.yaml
# invokeai-nodes-web --host 0.0.0.0

65
docker/docker-entrypoint.sh Executable file
View File

@ -0,0 +1,65 @@
#!/bin/bash
set -e -o pipefail
### Container entrypoint
# Runs the CMD as defined by the Dockerfile or passed to `docker run`
# Can be used to configure the runtime dir
# Bypass by using ENTRYPOINT or `--entrypoint`
### Set INVOKEAI_ROOT pointing to a valid runtime directory
# Otherwise configure the runtime dir first.
### Configure the InvokeAI runtime directory (done by default)):
# docker run --rm -it <this image> --configure
# or skip with --no-configure
### Set the CONTAINER_UID envvar to match your user.
# Ensures files created in the container are owned by you:
# docker run --rm -it -v /some/path:/invokeai -e CONTAINER_UID=$(id -u) <this image>
# Default UID: 1000 chosen due to popularity on Linux systems. Possibly 501 on MacOS.
USER_ID=${CONTAINER_UID:-1000}
USER=invoke
usermod -u ${USER_ID} ${USER} 1>/dev/null
configure() {
# Configure the runtime directory
if [[ -f ${INVOKEAI_ROOT}/invokeai.yaml ]]; then
echo "${INVOKEAI_ROOT}/invokeai.yaml exists. InvokeAI is already configured."
echo "To reconfigure InvokeAI, delete the above file."
echo "======================================================================"
else
mkdir -p ${INVOKEAI_ROOT}
chown --recursive ${USER} ${INVOKEAI_ROOT}
gosu ${USER} invokeai-configure --yes --default_only
fi
}
## Skip attempting to configure.
## Must be passed first, before any other args.
if [[ $1 != "--no-configure" ]]; then
configure
else
shift
fi
### Set the $PUBLIC_KEY env var to enable SSH access.
# We do not install openssh-server in the image by default to avoid bloat.
# but it is useful to have the full SSH server e.g. on Runpod.
# (use SCP to copy files to/from the image, etc)
if [[ -v "PUBLIC_KEY" ]] && [[ ! -d "${HOME}/.ssh" ]]; then
apt-get update
apt-get install -y openssh-server
pushd $HOME
mkdir -p .ssh
echo ${PUBLIC_KEY} > .ssh/authorized_keys
chmod -R 700 .ssh
popd
service ssh start
fi
cd ${INVOKEAI_ROOT}
# Run the CMD as the Container User (not root).
exec gosu ${USER} "$@"

View File

@ -1,54 +0,0 @@
#!/usr/bin/env bash
# This file is used to set environment variables for the build.sh and run.sh scripts.
# Try to detect the container flavor if no PIP_EXTRA_INDEX_URL got specified
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
# Activate virtual environment if not already activated and exists
if [[ -z $VIRTUAL_ENV ]]; then
[[ -e "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" ]] \
&& source "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" \
&& echo "Activated virtual environment: $VIRTUAL_ENV"
fi
# Decide which container flavor to build if not specified
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
# Check for CUDA and ROCm
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
if [[ "${CUDA_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR="cuda"
elif [[ "${ROCM_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR="rocm"
else
CONTAINER_FLAVOR="cpu"
fi
fi
# Set PIP_EXTRA_INDEX_URL based on container flavor
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
elif [[ "$CONTAINER_FLAVOR" == "cpu" ]]; then
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu"
# elif [[ -z "$CONTAINER_FLAVOR" || "$CONTAINER_FLAVOR" == "cuda" ]]; then
# PIP_PACKAGE=${PIP_PACKAGE-".[xformers]"}
fi
fi
# Variables shared by build.sh and run.sh
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
REPOSITORY_NAME="${REPOSITORY_NAME,,}"
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME}_data"}"
ARCH="${ARCH-$(uname -m)}"
PLATFORM="${PLATFORM-linux/${ARCH}}"
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
CONTAINER_FLAVOR="${CONTAINER_FLAVOR-cuda}"
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"
# enable docker buildkit
export DOCKER_BUILDKIT=1

View File

@ -1,41 +1,8 @@
#!/usr/bin/env bash
set -e
# How to use: https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
cd "$SCRIPTDIR" || exit 1
source ./env.sh
# Create outputs directory if it does not exist
[[ -d ./outputs ]] || mkdir ./outputs
echo -e "You are using these values:\n"
echo -e "Volumename:\t${VOLUMENAME}"
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
echo -e "local Models:\t${MODELSPATH:-unset}\n"
docker run \
--interactive \
--tty \
--rm \
--platform="${PLATFORM}" \
--name="${REPOSITORY_NAME}" \
--hostname="${REPOSITORY_NAME}" \
--mount type=volume,volume-driver=local,source="${VOLUMENAME}",target=/data \
--mount type=bind,source="$(pwd)"/outputs/,target=/data/outputs/ \
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
--publish=9090:9090 \
--cap-add=sys_nice \
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
"${CONTAINER_IMAGE}" ${@:+$@}
echo -e "\nCleaning trash folder ..."
for f in outputs/.Trash*; do
if [ -e "$f" ]; then
rm -Rf "$f"
break
fi
done
docker-compose up --build -d
docker-compose logs -f

60
docker/runpod-readme.md Normal file
View File

@ -0,0 +1,60 @@
# InvokeAI - A Stable Diffusion Toolkit
Stable Diffusion distribution by InvokeAI: https://github.com/invoke-ai
The Docker image tracks the `main` branch of the InvokeAI project, which means it includes the latest features, but may contain some bugs.
Your working directory is mounted under the `/workspace` path inside the pod. The models are in `/workspace/invokeai/models`, and outputs are in `/workspace/invokeai/outputs`.
> **Only the /workspace directory will persist between pod restarts!**
> **If you _terminate_ (not just _stop_) the pod, the /workspace will be lost.**
## Quickstart
1. Launch a pod from this template. **It will take about 5-10 minutes to run through the initial setup**. Be patient.
1. Wait for the application to load.
- TIP: you know it's ready when the CPU usage goes idle
- You can also check the logs for a line that says "_Point your browser at..._"
1. Open the Invoke AI web UI: click the `Connect` => `connect over HTTP` button.
1. Generate some art!
## Other things you can do
At any point you may edit the pod configuration and set an arbitrary Docker command. For example, you could run a command to downloads some models using `curl`, or fetch some images and place them into your outputs to continue a working session.
If you need to run *multiple commands*, define them in the Docker Command field like this:
`bash -c "cd ${INVOKEAI_ROOT}/outputs; wormhole receive 2-foo-bar; invoke.py --web --host 0.0.0.0"`
### Copying your data in and out of the pod
This image includes a couple of handy tools to help you get the data into the pod (such as your custom models or embeddings), and out of the pod (such as downloading your outputs). Here are your options for getting your data in and out of the pod:
- **SSH server**:
1. Make sure to create and set your Public Key in the RunPod settings (follow the official instructions)
1. Add an exposed port 22 (TCP) in the pod settings!
1. When your pod restarts, you will see a new entry in the `Connect` dialog. Use this SSH server to `scp` or `sftp` your files as necessary, or SSH into the pod using the fully fledged SSH server.
- [**Magic Wormhole**](https://magic-wormhole.readthedocs.io/en/latest/welcome.html):
1. On your computer, `pip install magic-wormhole` (see above instructions for details)
1. Connect to the command line **using the "light" SSH client** or the browser-based console. _Currently there's a bug where `wormhole` isn't available when connected to "full" SSH server, as described above_.
1. `wormhole send /workspace/invokeai/outputs` will send the entire `outputs` directory. You can also send individual files.
1. Once packaged, you will see a `wormhole receive <123-some-words>` command. Copy it
1. Paste this command into the terminal on your local machine to securely download the payload.
1. It works the same in reverse: you can `wormhole send` some models from your computer to the pod. Again, save your files somewhere in `/workspace` or they will be lost when the pod is stopped.
- **RunPod's Cloud Sync feature** may be used to sync the persistent volume to cloud storage. You could, for example, copy the entire `/workspace` to S3, add some custom models to it, and copy it back from S3 when launching new pod configurations. Follow the Cloud Sync instructions.
### Disable the NSFW checker
The NSFW checker is enabled by default. To disable it, edit the pod configuration and set the following command:
```
invoke --web --host 0.0.0.0 --no-nsfw_checker
```
---
Template ©2023 Eugene Brodsky [ebr](https://github.com/ebr)

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

View File

@ -1,8 +1,521 @@
# Invocations
Invocations represent a single operation, its inputs, and its outputs. These
operations and their outputs can be chained together to generate and modify
images.
Features in InvokeAI are added in the form of modular node-like systems called
**Invocations**.
An Invocation is simply a single operation that takes in some inputs and gives
out some outputs. We can then chain multiple Invocations together to create more
complex functionality.
## Invocations Directory
InvokeAI Invocations can be found in the `invokeai/app/invocations` directory.
You can add your new functionality to one of the existing Invocations in this
directory or create a new file in this directory as per your needs.
**Note:** _All Invocations must be inside this directory for InvokeAI to
recognize them as valid Invocations._
## Creating A New Invocation
In order to understand the process of creating a new Invocation, let us actually
create one.
In our example, let us create an Invocation that will take in an image, resize
it and output the resized image.
The first set of things we need to do when creating a new Invocation are -
- Create a new class that derives from a predefined parent class called
`BaseInvocation`.
- The name of every Invocation must end with the word `Invocation` in order for
it to be recognized as an Invocation.
- Every Invocation must have a `docstring` that describes what this Invocation
does.
- Every Invocation must have a unique `type` field defined which becomes its
indentifier.
- Invocations are strictly typed. We make use of the native
[typing](https://docs.python.org/3/library/typing.html) library and the
installed [pydantic](https://pydantic-docs.helpmanual.io/) library for
validation.
So let us do that.
```python
from typing import Literal
from .baseinvocation import BaseInvocation
class ResizeInvocation(BaseInvocation):
'''Resizes an image'''
type: Literal['resize'] = 'resize'
```
That's great.
Now we have setup the base of our new Invocation. Let us think about what inputs
our Invocation takes.
- We need an `image` that we are going to resize.
- We will need new `width` and `height` values to which we need to resize the
image to.
### **Inputs**
Every Invocation input is a pydantic `Field` and like everything else should be
strictly typed and defined.
So let us create these inputs for our Invocation. First up, the `image` input we
need. Generally, we can use standard variable types in Python but InvokeAI
already has a custom `ImageField` type that handles all the stuff that is needed
for image inputs.
But what is this `ImageField` ..? It is a special class type specifically
written to handle how images are dealt with in InvokeAI. We will cover how to
create your own custom field types later in this guide. For now, let's go ahead
and use it.
```python
from typing import Literal, Union
from pydantic import Field
from .baseinvocation import BaseInvocation
from ..models.image import ImageField
class ResizeInvocation(BaseInvocation):
'''Resizes an image'''
type: Literal['resize'] = 'resize'
# Inputs
image: Union[ImageField, None] = Field(description="The input image", default=None)
```
Let us break down our input code.
```python
image: Union[ImageField, None] = Field(description="The input image", default=None)
```
| Part | Value | Description |
| --------- | ---------------------------------------------------- | -------------------------------------------------------------------------------------------------- |
| Name | `image` | The variable that will hold our image |
| Type Hint | `Union[ImageField, None]` | The types for our field. Indicates that the image can either be an `ImageField` type or `None` |
| Field | `Field(description="The input image", default=None)` | The image variable is a field which needs a description and a default value that we set to `None`. |
Great. Now let us create our other inputs for `width` and `height`
```python
from typing import Literal, Union
from pydantic import Field
from .baseinvocation import BaseInvocation
from ..models.image import ImageField
class ResizeInvocation(BaseInvocation):
'''Resizes an image'''
type: Literal['resize'] = 'resize'
# Inputs
image: Union[ImageField, None] = Field(description="The input image", default=None)
width: int = Field(default=512, ge=64, le=2048, description="Width of the new image")
height: int = Field(default=512, ge=64, le=2048, description="Height of the new image")
```
As you might have noticed, we added two new parameters to the field type for
`width` and `height` called `gt` and `le`. These basically stand for _greater
than or equal to_ and _less than or equal to_. There are various other param
types for field that you can find on the **pydantic** documentation.
**Note:** _Any time it is possible to define constraints for our field, we
should do it so the frontend has more information on how to parse this field._
Perfect. We now have our inputs. Let us do something with these.
### **Invoke Function**
The `invoke` function is where all the magic happens. This function provides you
the `context` parameter that is of the type `InvocationContext` which will give
you access to the current context of the generation and all the other services
that are provided by it by InvokeAI.
Let us create this function first.
```python
from typing import Literal, Union
from pydantic import Field
from .baseinvocation import BaseInvocation, InvocationContext
from ..models.image import ImageField
class ResizeInvocation(BaseInvocation):
'''Resizes an image'''
type: Literal['resize'] = 'resize'
# Inputs
image: Union[ImageField, None] = Field(description="The input image", default=None)
width: int = Field(default=512, ge=64, le=2048, description="Width of the new image")
height: int = Field(default=512, ge=64, le=2048, description="Height of the new image")
def invoke(self, context: InvocationContext):
pass
```
### **Outputs**
The output of our Invocation will be whatever is returned by this `invoke`
function. Like with our inputs, we need to strongly type and define our outputs
too.
What is our output going to be? Another image. Normally you'd have to create a
type for this but InvokeAI already offers you an `ImageOutput` type that handles
all the necessary info related to image outputs. So let us use that.
We will cover how to create your own output types later in this guide.
```python
from typing import Literal, Union
from pydantic import Field
from .baseinvocation import BaseInvocation, InvocationContext
from ..models.image import ImageField
from .image import ImageOutput
class ResizeInvocation(BaseInvocation):
'''Resizes an image'''
type: Literal['resize'] = 'resize'
# Inputs
image: Union[ImageField, None] = Field(description="The input image", default=None)
width: int = Field(default=512, ge=64, le=2048, description="Width of the new image")
height: int = Field(default=512, ge=64, le=2048, description="Height of the new image")
def invoke(self, context: InvocationContext) -> ImageOutput:
pass
```
Perfect. Now that we have our Invocation setup, let us do what we want to do.
- We will first load the image. Generally we do this using the `PIL` library but
we can use one of the services provided by InvokeAI to load the image.
- We will resize the image using `PIL` to our input data.
- We will output this image in the format we set above.
So let's do that.
```python
from typing import Literal, Union
from pydantic import Field
from .baseinvocation import BaseInvocation, InvocationContext
from ..models.image import ImageField, ResourceOrigin, ImageCategory
from .image import ImageOutput
class ResizeInvocation(BaseInvocation):
'''Resizes an image'''
type: Literal['resize'] = 'resize'
# Inputs
image: Union[ImageField, None] = Field(description="The input image", default=None)
width: int = Field(default=512, ge=64, le=2048, description="Width of the new image")
height: int = Field(default=512, ge=64, le=2048, description="Height of the new image")
def invoke(self, context: InvocationContext) -> ImageOutput:
# Load the image using InvokeAI's predefined Image Service.
image = context.services.images.get_pil_image(self.image.image_origin, self.image.image_name)
# Resizing the image
# Because we used the above service, we already have a PIL image. So we can simply resize.
resized_image = image.resize((self.width, self.height))
# Preparing the image for output using InvokeAI's predefined Image Service.
output_image = context.services.images.create(
image=resized_image,
image_origin=ResourceOrigin.INTERNAL,
image_category=ImageCategory.GENERAL,
node_id=self.id,
session_id=context.graph_execution_state_id,
is_intermediate=self.is_intermediate,
)
# Returning the Image
return ImageOutput(
image=ImageField(
image_name=output_image.image_name,
image_origin=output_image.image_origin,
),
width=output_image.width,
height=output_image.height,
)
```
**Note:** Do not be overwhelmed by the `ImageOutput` process. InvokeAI has a
certain way that the images need to be dispatched in order to be stored and read
correctly. In 99% of the cases when dealing with an image output, you can simply
copy-paste the template above.
That's it. You made your own **Resize Invocation**.
## Result
Once you make your Invocation correctly, the rest of the process is fully
automated for you.
When you launch InvokeAI, you can go to `http://localhost:9090/docs` and see
your new Invocation show up there with all the relevant info.
![resize invocation](../assets/contributing/resize_invocation.png)
When you launch the frontend UI, you can go to the Node Editor tab and find your
new Invocation ready to be used.
![resize node editor](../assets/contributing/resize_node_editor.png)
# Advanced
## Custom Input Fields
Now that you know how to create your own Invocations, let us dive into slightly
more advanced topics.
While creating your own Invocations, you might run into a scenario where the
existing input types in InvokeAI do not meet your requirements. In such cases,
you can create your own input types.
Let us create one as an example. Let us say we want to create a color input
field that represents a color code. But before we start on that here are some
general good practices to keep in mind.
**Good Practices**
- There is no naming convention for input fields but we highly recommend that
you name it something appropriate like `ColorField`.
- It is not mandatory but it is heavily recommended to add a relevant
`docstring` to describe your input field.
- Keep your field in the same file as the Invocation that it is made for or in
another file where it is relevant.
All input types a class that derive from the `BaseModel` type from `pydantic`.
So let's create one.
```python
from pydantic import BaseModel
class ColorField(BaseModel):
'''A field that holds the rgba values of a color'''
pass
```
Perfect. Now let us create our custom inputs for our field. This is exactly
similar how you created input fields for your Invocation. All the same rules
apply. Let us create four fields representing the _red(r)_, _blue(b)_,
_green(g)_ and _alpha(a)_ channel of the color.
```python
class ColorField(BaseModel):
'''A field that holds the rgba values of a color'''
r: int = Field(ge=0, le=255, description="The red channel")
g: int = Field(ge=0, le=255, description="The green channel")
b: int = Field(ge=0, le=255, description="The blue channel")
a: int = Field(ge=0, le=255, description="The alpha channel")
```
That's it. We now have a new input field type that we can use in our Invocations
like this.
```python
color: ColorField = Field(default=ColorField(r=0, g=0, b=0, a=0), description='Background color of an image')
```
**Extra Config**
All input fields also take an additional `Config` class that you can use to do
various advanced things like setting required parameters and etc.
Let us do that for our _ColorField_ and enforce all the values because we did
not define any defaults for our fields.
```python
class ColorField(BaseModel):
'''A field that holds the rgba values of a color'''
r: int = Field(ge=0, le=255, description="The red channel")
g: int = Field(ge=0, le=255, description="The green channel")
b: int = Field(ge=0, le=255, description="The blue channel")
a: int = Field(ge=0, le=255, description="The alpha channel")
class Config:
schema_extra = {"required": ["r", "g", "b", "a"]}
```
Now it becomes mandatory for the user to supply all the values required by our
input field.
We will discuss the `Config` class in extra detail later in this guide and how
you can use it to make your Invocations more robust.
## Custom Output Types
Like with custom inputs, sometimes you might find yourself needing custom
outputs that InvokeAI does not provide. We can easily set one up.
Now that you are familiar with Invocations and Inputs, let us use that knowledge
to put together a custom output type for an Invocation that returns _width_,
_height_ and _background_color_ that we need to create a blank image.
- A custom output type is a class that derives from the parent class of
`BaseInvocationOutput`.
- It is not mandatory but we recommend using names ending with `Output` for
output types. So we'll call our class `BlankImageOutput`
- It is not mandatory but we highly recommend adding a `docstring` to describe
what your output type is for.
- Like Invocations, each output type should have a `type` variable that is
**unique**
Now that we know the basic rules for creating a new output type, let us go ahead
and make it.
```python
from typing import Literal
from pydantic import Field
from .baseinvocation import BaseInvocationOutput
class BlankImageOutput(BaseInvocationOutput):
'''Base output type for creating a blank image'''
type: Literal['blank_image_output'] = 'blank_image_output'
# Inputs
width: int = Field(description='Width of blank image')
height: int = Field(description='Height of blank image')
bg_color: ColorField = Field(description='Background color of blank image')
class Config:
schema_extra = {"required": ["type", "width", "height", "bg_color"]}
```
All set. We now have an output type that requires what we need to create a
blank_image. And if you noticed it, we even used the `Config` class to ensure
the fields are required.
## Custom Configuration
As you might have noticed when making inputs and outputs, we used a class called
`Config` from _pydantic_ to further customize them. Because our inputs and
outputs essentially inherit from _pydantic_'s `BaseModel` class, all
[configuration options](https://docs.pydantic.dev/latest/usage/schema/#schema-customization)
that are valid for _pydantic_ classes are also valid for our inputs and outputs.
You can do the same for your Invocations too but InvokeAI makes our life a
little bit easier on that end.
InvokeAI provides a custom configuration class called `InvocationConfig`
particularly for configuring Invocations. This is exactly the same as the raw
`Config` class from _pydantic_ with some extra stuff on top to help faciliate
parsing of the scheme in the frontend UI.
At the current moment, tihs `InvocationConfig` class is further improved with
the following features related the `ui`.
| Config Option | Field Type | Example |
| ------------- | ------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- |
| type_hints | `Dict[str, Literal["integer", "float", "boolean", "string", "enum", "image", "latents", "model", "control"]]` | `type_hint: "model"` provides type hints related to the model like displaying a list of available models |
| tags | `List[str]` | `tags: ['resize', 'image']` will classify your invocation under the tags of resize and image. |
| title | `str` | `title: 'Resize Image` will rename your to this custom title rather than infer from the name of the Invocation class. |
So let us update your `ResizeInvocation` with some extra configuration and see
how that works.
```python
from typing import Literal, Union
from pydantic import Field
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
from ..models.image import ImageField, ResourceOrigin, ImageCategory
from .image import ImageOutput
class ResizeInvocation(BaseInvocation):
'''Resizes an image'''
type: Literal['resize'] = 'resize'
# Inputs
image: Union[ImageField, None] = Field(description="The input image", default=None)
width: int = Field(default=512, ge=64, le=2048, description="Width of the new image")
height: int = Field(default=512, ge=64, le=2048, description="Height of the new image")
class Config(InvocationConfig):
schema_extra: {
ui: {
tags: ['resize', 'image'],
title: ['My Custom Resize']
}
}
def invoke(self, context: InvocationContext) -> ImageOutput:
# Load the image using InvokeAI's predefined Image Service.
image = context.services.images.get_pil_image(self.image.image_origin, self.image.image_name)
# Resizing the image
# Because we used the above service, we already have a PIL image. So we can simply resize.
resized_image = image.resize((self.width, self.height))
# Preparing the image for output using InvokeAI's predefined Image Service.
output_image = context.services.images.create(
image=resized_image,
image_origin=ResourceOrigin.INTERNAL,
image_category=ImageCategory.GENERAL,
node_id=self.id,
session_id=context.graph_execution_state_id,
is_intermediate=self.is_intermediate,
)
# Returning the Image
return ImageOutput(
image=ImageField(
image_name=output_image.image_name,
image_origin=output_image.image_origin,
),
width=output_image.width,
height=output_image.height,
)
```
We now customized our code to let the frontend know that our Invocation falls
under `resize` and `image` categories. So when the user searches for these
particular words, our Invocation will show up too.
We also set a custom title for our Invocation. So instead of being called
`Resize`, it will be called `My Custom Resize`.
As simple as that.
As time goes by, InvokeAI will further improve and add more customizability for
Invocation configuration. We will have more documentation regarding this at a
later time.
# **[TODO]**
## Custom Components For Frontend
Every backend input type should have a corresponding frontend component so the
UI knows what to render when you use a particular field type.
If you are using existing field types, we already have components for those. So
you don't have to worry about creating anything new. But this might not always
be the case. Sometimes you might want to create new field types and have the
frontend UI deal with it in a different way.
This is where we venture into the world of React and Javascript and create our
own new components for our Invocations. Do not fear the world of JS. It's
actually pretty straightforward.
Let us create a new component for our custom color field we created above. When
we use a color field, let us say we want the UI to display a color picker for
the user to pick from rather than entering values. That is what we will build
now.
---
# OLD -- TO BE DELETED OR MOVED LATER
---
## Creating a new invocation

View File

@ -1,9 +1,12 @@
---
title: Concepts Library
title: Concepts
---
# :material-library-shelves: The Hugging Face Concepts Library and Importing Textual Inversion files
With the advances in research, many new capabilities are available to customize the knowledge and understanding of novel concepts not originally contained in the base model.
## Using Textual Inversion Files
Textual inversion (TI) files are small models that customize the output of
@ -12,18 +15,16 @@ and artistic styles. They are also known as "embeds" in the machine learning
world.
Each TI file introduces one or more vocabulary terms to the SD model. These are
known in InvokeAI as "triggers." Triggers are often, but not always, denoted
using angle brackets as in "&lt;trigger-phrase&gt;". The two most common type of
known in InvokeAI as "triggers." Triggers are denoted using angle brackets
as in "&lt;trigger-phrase&gt;". The two most common type of
TI files that you'll encounter are `.pt` and `.bin` files, which are produced by
different TI training packages. InvokeAI supports both formats, but its
[built-in TI training system](TEXTUAL_INVERSION.md) produces `.pt`.
[built-in TI training system](TRAINING.md) produces `.pt`.
The [Hugging Face company](https://huggingface.co/sd-concepts-library) has
amassed a large ligrary of &gt;800 community-contributed TI files covering a
broad range of subjects and styles. InvokeAI has built-in support for this
library which downloads and merges TI files automatically upon request. You can
also install your own or others' TI files by placing them in a designated
directory.
broad range of subjects and styles. You can also install your own or others' TI files
by placing them in the designated directory for the compatible model type
### An Example
@ -41,66 +42,43 @@ You can also combine styles and concepts:
| :--------------------------------------------------------: |
| ![](../assets/concepts/image5.png) |
</figure>
## Using a Hugging Face Concept
!!! warning "Authenticating to HuggingFace"
Some concepts require valid authentication to HuggingFace. Without it, they will not be downloaded
and will be silently ignored.
If you used an installer to install InvokeAI, you may have already set a HuggingFace token.
If you skipped this step, you can:
- run the InvokeAI configuration script again (if you used a manual installer): `invokeai-configure`
- set one of the `HUGGINGFACE_TOKEN` or `HUGGING_FACE_HUB_TOKEN` environment variables to contain your token
Finally, if you already used any HuggingFace library on your computer, you might already have a token
in your local cache. Check for a hidden `.huggingface` directory in your home folder. If it
contains a `token` file, then you are all set.
Hugging Face TI concepts are downloaded and installed automatically as you
require them. This requires your machine to be connected to the Internet. To
find out what each concept is for, you can browse the
[Hugging Face concepts library](https://huggingface.co/sd-concepts-library) and
look at examples of what each concept produces.
To load concepts, you will need to open the Web UI's configuration
dialogue and activate "Show Textual Inversions from HF Concepts
Library". This will then add a list of HF Concepts to the dropdown
"Add Textual Inversion" menu. Select the concept(s) of your choice and
they will be incorporated into the positive prompt. A few concepts are
designed for the negative prompt, in which case you can add them to
the negative prompt box by select the down arrow icon next to the
textual inversion menu.
There are nearly 1000 HF concepts, more than will fit into a menu. For
this reason we only show the most popular concepts (those which have
received 5 or more likes). If you wish to use a concept that is not on
the list, you may simply type its name surrounded by brackets. For
example, to load the concept named "xidiversity", add `<xidiversity>`
to the positive or negative prompt text.
## Installing your Own TI Files
You may install any number of `.pt` and `.bin` files simply by copying them into
the `embeddings` directory of the InvokeAI runtime directory (usually `invokeai`
in your home directory). You may create subdirectories in order to organize the
files in any way you wish. Be careful not to overwrite one file with another.
the `embedding` directory of the corresponding InvokeAI models directory (usually `invokeai`
in your home directory). For example, you can simply move a Stable Diffusion 1.5 embedding file to
the `sd-1/embedding` folder. Be careful not to overwrite one file with another.
For example, TI files generated by the Hugging Face toolkit share the named
`learned_embedding.bin`. You can use subdirectories to keep them distinct.
`learned_embedding.bin`. You can rename these, or use subdirectories to keep them distinct.
At startup time, InvokeAI will scan the `embeddings` directory and load any TI
files it finds there. At startup you will see a message similar to this one:
At startup time, InvokeAI will scan the various `embedding` directories and load any TI
files it finds there for compatible models. At startup you will see a message similar to this one:
```bash
>> Current embedding manager terms: <HOI4-Leader>, <princess-knight>
```
To use these when generating, simply type the `<` key in your prompt to open the Textual Inversion WebUI and
select the embedding you'd like to use. This UI has type-ahead support, so you can easily find supported embeddings.
The terms you can use will appear in the "Add Textual Inversion"
dropdown menu above the HF Concepts.
## Using LoRAs
## Further Reading
LoRA files are models that customize the output of Stable Diffusion image generation.
Larger than embeddings, but much smaller than full models, they augment SD with improved
understanding of subjects and artistic styles.
Unlike TI files, LoRAs do not introduce novel vocabulary into the model's known tokens. Instead,
LoRAs augment the model's weights that are applied to generate imagery. LoRAs may be supplied
with a "trigger" word that they have been explicitly trained on, or may simply apply their
effect without being triggered.
LoRAs are typically stored in .safetensors files, which are the most secure way to store and transmit
these types of weights. You may install any number of `.safetensors` LoRA files simply by copying them into
the `lora` directory of the corresponding InvokeAI models directory (usually `invokeai`
in your home directory). For example, you can simply move a Stable Diffusion 1.5 LoRA file to
the `sd-1/lora` folder.
To use these when generating, open the LoRA menu item in the options panel, select the LoRAs you want to apply
and ensure that they have the appropriate weight recommended by the model provider. Typically, most LoRAs perform best at a weight of .75-1.
Please see [the repository](https://github.com/rinongal/textual_inversion) and
associated paper for details and limitations.

View File

@ -301,5 +301,48 @@ summoning up the concept of some sort of scifi creature? Let's find out.
Indeed, removing the word "hybrid" produces an image that is more like what we'd
expect.
In conclusion, prompt blending is great for exploring creative space,
but takes some trial and error to achieve the desired effect.
## Dynamic Prompts
Dynamic Prompts are a powerful feature designed to produce a variety of prompts based on user-defined options. Using a special syntax, you can construct a prompt with multiple possibilities, and the system will automatically generate a series of permutations based on your settings. This is extremely beneficial for ideation, exploring various scenarios, or testing different concepts swiftly and efficiently.
### Structure of a Dynamic Prompt
A Dynamic Prompt comprises of regular text, supplemented with alternatives enclosed within curly braces {} and separated by a vertical bar |. For example: {option1|option2|option3}. The system will then select one of the options to include in the final prompt. This flexible system allows for options to be placed throughout the text as needed.
Furthermore, Dynamic Prompts can designate multiple selections from a single group of options. This feature is triggered by prefixing the options with a numerical value followed by $$. For example, in {2$$option1|option2|option3}, the system will select two distinct options from the set.
### Creating Dynamic Prompts
To create a Dynamic Prompt, follow these steps:
Draft your sentence or phrase, identifying words or phrases with multiple possible options.
Encapsulate the different options within curly braces {}.
Within the braces, separate each option using a vertical bar |.
If you want to include multiple options from a single group, prefix with the desired number and $$.
For instance: A {house|apartment|lodge|cottage} in {summer|winter|autumn|spring} designed in {2$$style1|style2|style3}.
### How Dynamic Prompts Work
Once a Dynamic Prompt is configured, the system generates an array of combinations using the options provided. Each group of options in curly braces is treated independently, with the system selecting one option from each group. For a prefixed set (e.g., 2$$), the system will select two distinct options.
For example, the following prompts could be generated from the above Dynamic Prompt:
A house in summer designed in style1, style2
A lodge in autumn designed in style3, style1
A cottage in winter designed in style2, style3
And many more!
When the `Combinatorial` setting is on, Invoke will disable the "Images" selection, and generate every combination up until the setting for Max Prompts is reached.
When the `Combinatorial` setting is off, Invoke will randomly generate combinations up until the setting for Images has been reached.
### Tips and Tricks for Using Dynamic Prompts
Below are some useful strategies for creating Dynamic Prompts:
Utilize Dynamic Prompts to generate a wide spectrum of prompts, perfect for brainstorming and exploring diverse ideas.
Ensure that the options within a group are contextually relevant to the part of the sentence where they are used. For instance, group building types together, and seasons together.
Apply the 2$$ prefix when you want to incorporate more than one option from a single group. This becomes quite handy when mixing and matching different elements.
Experiment with different quantities for the prefix. For example, 3$$ will select three distinct options.
Be aware of coherence in your prompts. Although the system can generate all possible combinations, not all may semantically make sense. Therefore, carefully choose the options for each group.
Always review and fine-tune the generated prompts as needed. While Dynamic Prompts can help you generate a multitude of combinations, the final polishing and refining remain in your hands.

View File

@ -1,9 +1,10 @@
---
title: Textual-Inversion
title: Training
---
# :material-file-document: Textual Inversion
# :material-file-document: Training
# Textual Inversion Training
## **Personalizing Text-to-Image Generation**
You may personalize the generated images to provide your own styles or objects
@ -258,16 +259,6 @@ invokeai-ti \
--only_save_embeds
```
## Using Embeddings
After training completes, the resultant embeddings will be saved into your `$INVOKEAI_ROOT/embeddings/<trigger word>/learned_embeds.bin`.
These will be automatically loaded when you start InvokeAI.
Add the trigger word, surrounded by angle brackets, to use that embedding. For example, if your trigger word was `terence`, use `<terence>` in prompts. This is the same syntax used by the HuggingFace concepts library.
**Note:** `.pt` embeddings do not require the angle brackets.
## Troubleshooting
### `Cannot load embedding for <trigger>. It was trained on a model with token dimension 1024, but the current model has token dimension 768`

View File

@ -248,6 +248,7 @@ class InvokeAiInstance:
"install",
"--require-virtualenv",
"torch~=2.0.0",
"torchmetrics==0.11.4",
"torchvision>=0.14.1",
"--force-reinstall",
"--find-links" if find_links is not None else None,

View File

@ -20,7 +20,7 @@ echo 9. Update InvokeAI
echo 10. Command-line help
echo Q - Quit
set /P choice="Please enter 1-10, Q: [2] "
if not defined choice set choice=2
if not defined choice set choice=1
IF /I "%choice%" == "1" (
echo Starting the InvokeAI browser-based UI..
python .venv\Scripts\invokeai-web.exe %*
@ -56,7 +56,7 @@ IF /I "%choice%" == "1" (
call cmd /k
) ELSE IF /I "%choice%" == "9" (
echo Running invokeai-update...
python .venv\Scripts\invokeai-update.exe %*
python -m invokeai.frontend.install.invokeai_update
) ELSE IF /I "%choice%" == "10" (
echo Displaying command line help...
python .venv\Scripts\invokeai.exe --help %*

View File

@ -93,7 +93,7 @@ do_choice() {
9)
clear
printf "Update InvokeAI\n"
invokeai-update
python -m invokeai.frontend.install.invokeai_update
;;
10)
clear

View File

@ -13,10 +13,10 @@ from invokeai.app.services.board_record_storage import SqliteBoardRecordStorage
from invokeai.app.services.boards import BoardService, BoardServiceDependencies
from invokeai.app.services.image_record_storage import SqliteImageRecordStorage
from invokeai.app.services.images import ImageService, ImageServiceDependencies
from invokeai.app.services.metadata import CoreMetadataService
from invokeai.app.services.resource_name import SimpleNameService
from invokeai.app.services.urls import LocalUrlService
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.version.invokeai_version import __version__
from ..services.default_graphs import create_system_graphs
from ..services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
@ -58,7 +58,8 @@ class ApiDependencies:
@staticmethod
def initialize(config, event_handler_id: int, logger: Logger = logger):
logger.info(f"Internet connectivity is {config.internet_available}")
logger.debug(f'InvokeAI version {__version__}')
logger.debug(f"Internet connectivity is {config.internet_available}")
events = FastAPIEventService(event_handler_id)
@ -73,7 +74,6 @@ class ApiDependencies:
)
urls = LocalUrlService()
metadata = CoreMetadataService()
image_record_storage = SqliteImageRecordStorage(db_location)
image_file_storage = DiskImageFileStorage(f"{output_folder}/images")
names = SimpleNameService()
@ -109,7 +109,6 @@ class ApiDependencies:
board_image_record_storage=board_image_record_storage,
image_record_storage=image_record_storage,
image_file_storage=image_file_storage,
metadata=metadata,
url=urls,
logger=logger,
names=names,

View File

@ -1,18 +1,36 @@
from fastapi.routing import APIRouter
from pydantic import BaseModel
from pydantic import BaseModel, Field
from invokeai.backend.image_util.patchmatch import PatchMatch
from invokeai.version import __version__
app_router = APIRouter(prefix="/v1/app", tags=['app'])
app_router = APIRouter(prefix="/v1/app", tags=["app"])
class AppVersion(BaseModel):
"""App Version Response"""
version: str
version: str = Field(description="App version")
@app_router.get('/version', operation_id="app_version",
status_code=200,
response_model=AppVersion)
class AppConfig(BaseModel):
"""App Config Response"""
infill_methods: list[str] = Field(description="List of available infill methods")
@app_router.get(
"/version", operation_id="app_version", status_code=200, response_model=AppVersion
)
async def get_version() -> AppVersion:
return AppVersion(version=__version__)
@app_router.get(
"/config", operation_id="get_config", status_code=200, response_model=AppConfig
)
async def get_config() -> AppConfig:
infill_methods = ['tile']
if PatchMatch.patchmatch_available():
infill_methods.append('patchmatch')
return AppConfig(infill_methods=infill_methods)

View File

@ -7,6 +7,7 @@ from fastapi.responses import FileResponse
from fastapi.routing import APIRouter
from PIL import Image
from invokeai.app.invocations.metadata import ImageMetadata
from invokeai.app.models.image import ImageCategory, ResourceOrigin
from invokeai.app.services.image_record_storage import OffsetPaginatedResults
from invokeai.app.services.item_storage import PaginatedResults
@ -104,23 +105,38 @@ async def update_image(
@images_router.get(
"/{image_name}/metadata",
operation_id="get_image_metadata",
"/{image_name}",
operation_id="get_image_dto",
response_model=ImageDTO,
)
async def get_image_metadata(
async def get_image_dto(
image_name: str = Path(description="The name of image to get"),
) -> ImageDTO:
"""Gets an image's metadata"""
"""Gets an image's DTO"""
try:
return ApiDependencies.invoker.services.images.get_dto(image_name)
except Exception as e:
raise HTTPException(status_code=404)
@images_router.get(
"/{image_name}/metadata",
operation_id="get_image_metadata",
response_model=ImageMetadata,
)
async def get_image_metadata(
image_name: str = Path(description="The name of image to get"),
) -> ImageMetadata:
"""Gets an image's metadata"""
try:
return ApiDependencies.invoker.services.images.get_metadata(image_name)
except Exception as e:
raise HTTPException(status_code=404)
@images_router.get(
"/{image_name}",
"/{image_name}/full",
operation_id="get_image_full",
response_class=Response,
responses={
@ -213,10 +229,10 @@ async def get_image_urls(
@images_router.get(
"/",
operation_id="list_images_with_metadata",
operation_id="list_image_dtos",
response_model=OffsetPaginatedResults[ImageDTO],
)
async def list_images_with_metadata(
async def list_image_dtos(
image_origin: Optional[ResourceOrigin] = Query(
default=None, description="The origin of images to list"
),
@ -232,7 +248,7 @@ async def list_images_with_metadata(
offset: int = Query(default=0, description="The page offset"),
limit: int = Query(default=10, description="The number of images per page"),
) -> OffsetPaginatedResults[ImageDTO]:
"""Gets a list of images"""
"""Gets a list of image DTOs"""
image_dtos = ApiDependencies.invoker.services.images.get_many(
offset,

View File

@ -1,5 +1,6 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
# Copyright (c) 2022-2023 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team
import asyncio
import sys
from inspect import signature
import uvicorn
@ -20,6 +21,13 @@ from ..backend.util.logging import InvokeAILogger
app_config = InvokeAIAppConfig.get_config()
app_config.parse_args()
logger = InvokeAILogger.getLogger(config=app_config)
from invokeai.version.invokeai_version import __version__
# we call this early so that the message appears before
# other invokeai initialization messages
if app_config.version:
print(f'InvokeAI version {__version__}')
sys.exit(0)
import invokeai.frontend.web as web_dir
import mimetypes
@ -28,6 +36,7 @@ from .api.dependencies import ApiDependencies
from .api.routers import sessions, models, images, boards, board_images, app_info
from .api.sockets import SocketIO
from .invocations.baseinvocation import BaseInvocation
import torch
if torch.backends.mps.is_available():

View File

@ -16,6 +16,12 @@ from invokeai.backend.util.logging import InvokeAILogger
config = InvokeAIAppConfig.get_config()
config.parse_args()
logger = InvokeAILogger().getLogger(config=config)
from invokeai.version.invokeai_version import __version__
# we call this early so that the message appears before other invokeai initialization messages
if config.version:
print(f'InvokeAI version {__version__}')
sys.exit(0)
from invokeai.app.services.board_image_record_storage import (
SqliteBoardImageRecordStorage,
@ -28,7 +34,6 @@ from invokeai.app.services.board_record_storage import SqliteBoardRecordStorage
from invokeai.app.services.boards import BoardService, BoardServiceDependencies
from invokeai.app.services.image_record_storage import SqliteImageRecordStorage
from invokeai.app.services.images import ImageService, ImageServiceDependencies
from invokeai.app.services.metadata import CoreMetadataService
from invokeai.app.services.resource_name import SimpleNameService
from invokeai.app.services.urls import LocalUrlService
from .services.default_graphs import (default_text_to_image_graph_id,
@ -208,6 +213,7 @@ def invoke_all(context: CliContext):
raise SessionError()
def invoke_cli():
logger.info(f'InvokeAI version {__version__}')
# get the optional list of invocations to execute on the command line
parser = config.get_parser()
parser.add_argument('commands',nargs='*')
@ -237,7 +243,6 @@ def invoke_cli():
)
urls = LocalUrlService()
metadata = CoreMetadataService()
image_record_storage = SqliteImageRecordStorage(db_location)
image_file_storage = DiskImageFileStorage(f"{output_folder}/images")
names = SimpleNameService()
@ -270,7 +275,6 @@ def invoke_cli():
board_image_record_storage=board_image_record_storage,
image_record_storage=image_record_storage,
image_file_storage=image_file_storage,
metadata=metadata,
url=urls,
logger=logger,
names=names,

View File

@ -154,40 +154,42 @@ class InpaintInvocation(BaseInvocation):
@contextmanager
def load_model_old_way(self, context, scheduler):
def _lora_loader():
for lora in self.unet.loras:
lora_info = context.services.model_manager.get_model(
**lora.dict(exclude={"weight"}))
yield (lora_info.context.model, lora.weight)
del lora_info
return
unet_info = context.services.model_manager.get_model(**self.unet.unet.dict())
vae_info = context.services.model_manager.get_model(**self.vae.vae.dict())
#unet = unet_info.context.model
#vae = vae_info.context.model
with vae_info as vae,\
ModelPatcher.apply_lora_unet(unet_info.context.model, _lora_loader()),\
unet_info as unet:
with ExitStack() as stack:
loras = [(stack.enter_context(context.services.model_manager.get_model(**lora.dict(exclude={"weight"}))), lora.weight) for lora in self.unet.loras]
device = context.services.model_manager.mgr.cache.execution_device
dtype = context.services.model_manager.mgr.cache.precision
with vae_info as vae,\
unet_info as unet,\
ModelPatcher.apply_lora_unet(unet, loras):
pipeline = StableDiffusionGeneratorPipeline(
vae=vae,
text_encoder=None,
tokenizer=None,
unet=unet,
scheduler=scheduler,
safety_checker=None,
feature_extractor=None,
requires_safety_checker=False,
precision="float16" if dtype == torch.float16 else "float32",
execution_device=device,
)
device = context.services.model_manager.mgr.cache.execution_device
dtype = context.services.model_manager.mgr.cache.precision
pipeline = StableDiffusionGeneratorPipeline(
vae=vae,
text_encoder=None,
tokenizer=None,
unet=unet,
scheduler=scheduler,
safety_checker=None,
feature_extractor=None,
requires_safety_checker=False,
precision="float16" if dtype == torch.float16 else "float32",
execution_device=device,
)
yield OldModelInfo(
name=self.unet.unet.model_name,
hash="<NO-HASH>",
model=pipeline,
)
yield OldModelInfo(
name=self.unet.unet.model_name,
hash="<NO-HASH>",
model=pipeline,
)
def invoke(self, context: InvocationContext) -> ImageOutput:
image = (
@ -226,21 +228,21 @@ class InpaintInvocation(BaseInvocation):
), # Shorthand for passing all of the parameters above manually
)
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
# each time it is called. We only need the first one.
generator_output = next(outputs)
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
# each time it is called. We only need the first one.
generator_output = next(outputs)
image_dto = context.services.images.create(
image=generator_output.image,
image_origin=ResourceOrigin.INTERNAL,
image_category=ImageCategory.GENERAL,
session_id=context.graph_execution_state_id,
node_id=self.id,
is_intermediate=self.is_intermediate,
)
image_dto = context.services.images.create(
image=generator_output.image,
image_origin=ResourceOrigin.INTERNAL,
image_category=ImageCategory.GENERAL,
session_id=context.graph_execution_state_id,
node_id=self.id,
is_intermediate=self.is_intermediate,
)
return ImageOutput(
image=ImageField(image_name=image_dto.image_name),
width=image_dto.width,
height=image_dto.height,
)
return ImageOutput(
image=ImageField(image_name=image_dto.image_name),
width=image_dto.width,
height=image_dto.height,
)

View File

@ -9,9 +9,9 @@ from diffusers.image_processor import VaeImageProcessor
from diffusers.schedulers import SchedulerMixin as Scheduler
from pydantic import BaseModel, Field, validator
from invokeai.app.invocations.metadata import CoreMetadata
from invokeai.app.util.step_callback import stable_diffusion_step_callback
from ..models.image import ImageCategory, ImageField, ResourceOrigin
from ...backend.model_management.lora import ModelPatcher
from ...backend.stable_diffusion import PipelineIntermediateState
from ...backend.stable_diffusion.diffusers_pipeline import (
@ -21,6 +21,7 @@ from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import \
PostprocessingSettings
from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
from ...backend.util.devices import torch_dtype
from ..models.image import ImageCategory, ImageField, ResourceOrigin
from .baseinvocation import (BaseInvocation, BaseInvocationOutput,
InvocationConfig, InvocationContext)
from .compel import ConditioningField
@ -449,6 +450,8 @@ class LatentsToImageInvocation(BaseInvocation):
tiled: bool = Field(
default=False,
description="Decode latents by overlaping tiles(less memory consumption)")
metadata: Optional[CoreMetadata] = Field(default=None, description="Optional core metadata to be written to the image")
# Schema customisation
class Config(InvocationConfig):
@ -493,7 +496,8 @@ class LatentsToImageInvocation(BaseInvocation):
image_category=ImageCategory.GENERAL,
node_id=self.id,
session_id=context.graph_execution_state_id,
is_intermediate=self.is_intermediate
is_intermediate=self.is_intermediate,
metadata=self.metadata.dict() if self.metadata else None,
)
return ImageOutput(
@ -515,9 +519,9 @@ class ResizeLatentsInvocation(BaseInvocation):
# Inputs
latents: Optional[LatentsField] = Field(
description="The latents to resize")
width: int = Field(
width: Union[int, None] = Field(default=512,
ge=64, multiple_of=8, description="The width to resize to (px)")
height: int = Field(
height: Union[int, None] = Field(default=512,
ge=64, multiple_of=8, description="The height to resize to (px)")
mode: LATENTS_INTERPOLATION_MODE = Field(
default="bilinear", description="The interpolation mode")

View File

@ -0,0 +1,124 @@
from typing import Literal, Optional, Union
from pydantic import BaseModel, Field
from invokeai.app.invocations.baseinvocation import (BaseInvocation,
BaseInvocationOutput,
InvocationContext)
from invokeai.app.invocations.controlnet_image_processors import ControlField
from invokeai.app.invocations.model import (LoRAModelField, MainModelField,
VAEModelField)
class LoRAMetadataField(BaseModel):
"""LoRA metadata for an image generated in InvokeAI."""
lora: LoRAModelField = Field(description="The LoRA model")
weight: float = Field(description="The weight of the LoRA model")
class CoreMetadata(BaseModel):
"""Core generation metadata for an image generated in InvokeAI."""
generation_mode: str = Field(description="The generation mode that output this image",)
positive_prompt: str = Field(description="The positive prompt parameter")
negative_prompt: str = Field(description="The negative prompt parameter")
width: int = Field(description="The width parameter")
height: int = Field(description="The height parameter")
seed: int = Field(description="The seed used for noise generation")
rand_device: str = Field(description="The device used for random number generation")
cfg_scale: float = Field(description="The classifier-free guidance scale parameter")
steps: int = Field(description="The number of steps used for inference")
scheduler: str = Field(description="The scheduler used for inference")
clip_skip: int = Field(description="The number of skipped CLIP layers",)
model: MainModelField = Field(description="The main model used for inference")
controlnets: list[ControlField]= Field(description="The ControlNets used for inference")
loras: list[LoRAMetadataField] = Field(description="The LoRAs used for inference")
strength: Union[float, None] = Field(
default=None,
description="The strength used for latents-to-latents",
)
init_image: Union[str, None] = Field(
default=None, description="The name of the initial image"
)
vae: Union[VAEModelField, None] = Field(
default=None,
description="The VAE used for decoding, if the main model's default was not used",
)
class ImageMetadata(BaseModel):
"""An image's generation metadata"""
metadata: Optional[dict] = Field(
default=None,
description="The image's core metadata, if it was created in the Linear or Canvas UI",
)
graph: Optional[dict] = Field(
default=None, description="The graph that created the image"
)
class MetadataAccumulatorOutput(BaseInvocationOutput):
"""The output of the MetadataAccumulator node"""
type: Literal["metadata_accumulator_output"] = "metadata_accumulator_output"
metadata: CoreMetadata = Field(description="The core metadata for the image")
class MetadataAccumulatorInvocation(BaseInvocation):
"""Outputs a Core Metadata Object"""
type: Literal["metadata_accumulator"] = "metadata_accumulator"
generation_mode: str = Field(description="The generation mode that output this image",)
positive_prompt: str = Field(description="The positive prompt parameter")
negative_prompt: str = Field(description="The negative prompt parameter")
width: int = Field(description="The width parameter")
height: int = Field(description="The height parameter")
seed: int = Field(description="The seed used for noise generation")
rand_device: str = Field(description="The device used for random number generation")
cfg_scale: float = Field(description="The classifier-free guidance scale parameter")
steps: int = Field(description="The number of steps used for inference")
scheduler: str = Field(description="The scheduler used for inference")
clip_skip: int = Field(description="The number of skipped CLIP layers",)
model: MainModelField = Field(description="The main model used for inference")
controlnets: list[ControlField]= Field(description="The ControlNets used for inference")
loras: list[LoRAMetadataField] = Field(description="The LoRAs used for inference")
strength: Union[float, None] = Field(
default=None,
description="The strength used for latents-to-latents",
)
init_image: Union[str, None] = Field(
default=None, description="The name of the initial image"
)
vae: Union[VAEModelField, None] = Field(
default=None,
description="The VAE used for decoding, if the main model's default was not used",
)
def invoke(self, context: InvocationContext) -> MetadataAccumulatorOutput:
"""Collects and outputs a CoreMetadata object"""
return MetadataAccumulatorOutput(
metadata=CoreMetadata(
generation_mode=self.generation_mode,
positive_prompt=self.positive_prompt,
negative_prompt=self.negative_prompt,
width=self.width,
height=self.height,
seed=self.seed,
rand_device=self.rand_device,
cfg_scale=self.cfg_scale,
steps=self.steps,
scheduler=self.scheduler,
model=self.model,
strength=self.strength,
init_image=self.init_image,
vae=self.vae,
controlnets=self.controlnets,
loras=self.loras,
clip_skip=self.clip_skip,
)
)

View File

@ -1,93 +0,0 @@
from typing import Optional, Union, List
from pydantic import BaseModel, Extra, Field, StrictFloat, StrictInt, StrictStr
class ImageMetadata(BaseModel):
"""
Core generation metadata for an image/tensor generated in InvokeAI.
Also includes any metadata from the image's PNG tEXt chunks.
Generated by traversing the execution graph, collecting the parameters of the nearest ancestors
of a given node.
Full metadata may be accessed by querying for the session in the `graph_executions` table.
"""
class Config:
extra = Extra.allow
"""
This lets the ImageMetadata class accept arbitrary additional fields. The CoreMetadataService
won't add any fields that are not already defined, but other a different metadata service
implementation might.
"""
type: Optional[StrictStr] = Field(
default=None,
description="The type of the ancestor node of the image output node.",
)
"""The type of the ancestor node of the image output node."""
positive_conditioning: Optional[StrictStr] = Field(
default=None, description="The positive conditioning."
)
"""The positive conditioning"""
negative_conditioning: Optional[StrictStr] = Field(
default=None, description="The negative conditioning."
)
"""The negative conditioning"""
width: Optional[StrictInt] = Field(
default=None, description="Width of the image/latents in pixels."
)
"""Width of the image/latents in pixels"""
height: Optional[StrictInt] = Field(
default=None, description="Height of the image/latents in pixels."
)
"""Height of the image/latents in pixels"""
seed: Optional[StrictInt] = Field(
default=None, description="The seed used for noise generation."
)
"""The seed used for noise generation"""
# cfg_scale: Optional[StrictFloat] = Field(
# cfg_scale: Union[float, list[float]] = Field(
cfg_scale: Union[StrictFloat, List[StrictFloat]] = Field(
default=None, description="The classifier-free guidance scale."
)
"""The classifier-free guidance scale"""
steps: Optional[StrictInt] = Field(
default=None, description="The number of steps used for inference."
)
"""The number of steps used for inference"""
scheduler: Optional[StrictStr] = Field(
default=None, description="The scheduler used for inference."
)
"""The scheduler used for inference"""
model: Optional[StrictStr] = Field(
default=None, description="The model used for inference."
)
"""The model used for inference"""
strength: Optional[StrictFloat] = Field(
default=None,
description="The strength used for image-to-image/latents-to-latents.",
)
"""The strength used for image-to-image/latents-to-latents."""
latents: Optional[StrictStr] = Field(
default=None, description="The ID of the initial latents."
)
"""The ID of the initial latents"""
vae: Optional[StrictStr] = Field(
default=None, description="The VAE used for decoding."
)
"""The VAE used for decoding"""
unet: Optional[StrictStr] = Field(
default=None, description="The UNet used dor inference."
)
"""The UNet used dor inference"""
clip: Optional[StrictStr] = Field(
default=None, description="The CLIP Encoder used for conditioning."
)
"""The CLIP Encoder used for conditioning"""
extra: Optional[StrictStr] = Field(
default=None,
description="Uploaded image metadata, extracted from the PNG tEXt chunk.",
)
"""Uploaded image metadata, extracted from the PNG tEXt chunk."""

View File

@ -23,7 +23,8 @@ InvokeAI:
xformers_enabled: false
sequential_guidance: false
precision: float16
max_loaded_models: 4
max_cache_size: 6
max_vram_cache_size: 2.7
always_use_cpu: false
free_gpu_mem: false
Features:
@ -168,7 +169,7 @@ from argparse import ArgumentParser
from omegaconf import OmegaConf, DictConfig
from pathlib import Path
from pydantic import BaseSettings, Field, parse_obj_as
from typing import ClassVar, Dict, List, Literal, Union, get_origin, get_type_hints, get_args
from typing import ClassVar, Dict, List, Set, Literal, Union, get_origin, get_type_hints, get_args
INIT_FILE = Path('invokeai.yaml')
MODEL_CORE = Path('models/core')
@ -270,7 +271,8 @@ class InvokeAISettings(BaseSettings):
@classmethod
def _excluded(self)->List[str]:
return ['type','initconf']
# combination of deprecated parameters and internal ones
return ['type','initconf', 'gpu_mem_reserved', 'max_loaded_models', 'version']
class Config:
env_file_encoding = 'utf-8'
@ -363,8 +365,10 @@ setting environment variables INVOKEAI_<setting>.
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance')
free_gpu_mem : bool = Field(default=False, description="If true, purge model from GPU after each generation.", category='Memory/Performance')
max_loaded_models : int = Field(default=3, gt=0, description="(DEPRECATED: use max_cache_size) Maximum number of models to keep in memory for rapid switching", category='Memory/Performance')
max_loaded_models : int = Field(default=3, gt=0, description="(DEPRECATED: use max_cache_size) Maximum number of models to keep in memory for rapid switching", category='DEPRECATED')
max_cache_size : float = Field(default=6.0, gt=0, description="Maximum memory amount used by model cache for rapid switching", category='Memory/Performance')
max_vram_cache_size : float = Field(default=2.75, ge=0, description="Amount of VRAM reserved for model storage", category='Memory/Performance')
gpu_mem_reserved : float = Field(default=2.75, ge=0, description="DEPRECATED: use max_vram_cache_size. Amount of VRAM reserved for model storage", category='DEPRECATED')
precision : Literal[tuple(['auto','float16','float32','autocast'])] = Field(default='float16',description='Floating point precision', category='Memory/Performance')
sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance')
xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance')
@ -389,6 +393,8 @@ setting environment variables INVOKEAI_<setting>.
# note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues
log_format : Literal[tuple(['plain','color','syslog','legacy'])] = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style', category="Logging")
log_level : Literal[tuple(["debug","info","warning","error","critical"])] = Field(default="debug", description="Emit logging messages at this level or higher", category="Logging")
version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other")
#fmt: on
def parse_args(self, argv: List[str]=None, conf: DictConfig = None, clobber=False):

View File

@ -1,14 +1,14 @@
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654) and the InvokeAI Team
import json
from abc import ABC, abstractmethod
from pathlib import Path
from queue import Queue
from typing import Dict, Optional, Union
from PIL.Image import Image as PILImageType
from PIL import Image, PngImagePlugin
from PIL.Image import Image as PILImageType
from send2trash import send2trash
from invokeai.app.models.metadata import ImageMetadata
from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail
@ -59,7 +59,8 @@ class ImageFileStorageBase(ABC):
self,
image: PILImageType,
image_name: str,
metadata: Optional[ImageMetadata] = None,
metadata: Optional[dict] = None,
graph: Optional[dict] = None,
thumbnail_size: int = 256,
) -> None:
"""Saves an image and a 256x256 WEBP thumbnail. Returns a tuple of the image name, thumbnail name, and created timestamp."""
@ -110,20 +111,22 @@ class DiskImageFileStorage(ImageFileStorageBase):
self,
image: PILImageType,
image_name: str,
metadata: Optional[ImageMetadata] = None,
metadata: Optional[dict] = None,
graph: Optional[dict] = None,
thumbnail_size: int = 256,
) -> None:
try:
self.__validate_storage_folders()
image_path = self.get_path(image_name)
pnginfo = PngImagePlugin.PngInfo()
if metadata is not None:
pnginfo = PngImagePlugin.PngInfo()
pnginfo.add_text("invokeai", metadata.json())
image.save(image_path, "PNG", pnginfo=pnginfo)
else:
image.save(image_path, "PNG")
pnginfo.add_text("invokeai_metadata", json.dumps(metadata))
if graph is not None:
pnginfo.add_text("invokeai_graph", json.dumps(graph))
image.save(image_path, "PNG", pnginfo=pnginfo)
thumbnail_name = get_thumbnail_name(image_name)
thumbnail_path = self.get_path(thumbnail_name, thumbnail=True)
thumbnail_image = make_thumbnail(image, thumbnail_size)

View File

@ -1,22 +1,16 @@
import json
import sqlite3
import threading
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Generic, Optional, TypeVar, cast
import sqlite3
import threading
from pydantic import BaseModel, Field
from pydantic.generics import GenericModel
from invokeai.app.models.metadata import ImageMetadata
from invokeai.app.models.image import (
ImageCategory,
ResourceOrigin,
)
from invokeai.app.models.image import ImageCategory, ResourceOrigin
from invokeai.app.services.models.image_record import (
ImageRecord,
ImageRecordChanges,
deserialize_image_record,
)
ImageRecord, ImageRecordChanges, deserialize_image_record)
T = TypeVar("T", bound=BaseModel)
@ -54,6 +48,28 @@ class ImageRecordDeleteException(Exception):
super().__init__(message)
IMAGE_DTO_COLS = ", ".join(
list(
map(
lambda c: "images." + c,
[
"image_name",
"image_origin",
"image_category",
"width",
"height",
"session_id",
"node_id",
"is_intermediate",
"created_at",
"updated_at",
"deleted_at",
],
)
)
)
class ImageRecordStorageBase(ABC):
"""Low-level service responsible for interfacing with the image record store."""
@ -64,6 +80,11 @@ class ImageRecordStorageBase(ABC):
"""Gets an image record."""
pass
@abstractmethod
def get_metadata(self, image_name: str) -> Optional[dict]:
"""Gets an image's metadata'."""
pass
@abstractmethod
def update(
self,
@ -108,7 +129,7 @@ class ImageRecordStorageBase(ABC):
height: int,
session_id: Optional[str],
node_id: Optional[str],
metadata: Optional[ImageMetadata],
metadata: Optional[dict],
is_intermediate: bool = False,
) -> datetime:
"""Saves an image record."""
@ -162,7 +183,6 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
node_id TEXT,
metadata TEXT,
is_intermediate BOOLEAN DEFAULT FALSE,
board_id TEXT,
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
-- Updated via trigger
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
@ -213,7 +233,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
self._cursor.execute(
f"""--sql
SELECT * FROM images
SELECT {IMAGE_DTO_COLS} FROM images
WHERE image_name = ?;
""",
(image_name,),
@ -231,6 +251,28 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
return deserialize_image_record(dict(result))
def get_metadata(self, image_name: str) -> Optional[dict]:
try:
self._lock.acquire()
self._cursor.execute(
f"""--sql
SELECT images.metadata FROM images
WHERE image_name = ?;
""",
(image_name,),
)
result = cast(Optional[sqlite3.Row], self._cursor.fetchone())
if not result or not result[0]:
return None
return json.loads(result[0])
except sqlite3.Error as e:
self._conn.rollback()
raise ImageRecordNotFoundException from e
finally:
self._lock.release()
def update(
self,
image_name: str,
@ -298,8 +340,8 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
WHERE 1=1
"""
images_query = """--sql
SELECT images.*
images_query = f"""--sql
SELECT {IMAGE_DTO_COLS}
FROM images
LEFT JOIN board_images ON board_images.image_name = images.image_name
WHERE 1=1
@ -417,12 +459,12 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
width: int,
height: int,
node_id: Optional[str],
metadata: Optional[ImageMetadata],
metadata: Optional[dict],
is_intermediate: bool = False,
) -> datetime:
try:
metadata_json = (
None if metadata is None else metadata.json(exclude_none=True)
None if metadata is None else json.dumps(metadata)
)
self._lock.acquire()
self._cursor.execute(
@ -472,9 +514,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
finally:
self._lock.release()
def get_most_recent_image_for_board(
self, board_id: str
) -> Optional[ImageRecord]:
def get_most_recent_image_for_board(self, board_id: str) -> Optional[ImageRecord]:
try:
self._lock.acquire()
self._cursor.execute(

View File

@ -1,39 +1,30 @@
import json
from abc import ABC, abstractmethod
from logging import Logger
from typing import Optional, TYPE_CHECKING, Union
from typing import TYPE_CHECKING, Optional
from PIL.Image import Image as PILImageType
from invokeai.app.models.image import (
ImageCategory,
ResourceOrigin,
InvalidImageCategoryException,
InvalidOriginException,
)
from invokeai.app.models.metadata import ImageMetadata
from invokeai.app.services.board_image_record_storage import BoardImageRecordStorageBase
from invokeai.app.services.image_record_storage import (
ImageRecordDeleteException,
ImageRecordNotFoundException,
ImageRecordSaveException,
ImageRecordStorageBase,
OffsetPaginatedResults,
)
from invokeai.app.services.models.image_record import (
ImageRecord,
ImageDTO,
ImageRecordChanges,
image_record_to_dto,
)
from invokeai.app.invocations.metadata import ImageMetadata
from invokeai.app.models.image import (ImageCategory,
InvalidImageCategoryException,
InvalidOriginException, ResourceOrigin)
from invokeai.app.services.board_image_record_storage import \
BoardImageRecordStorageBase
from invokeai.app.services.graph import Graph
from invokeai.app.services.image_file_storage import (
ImageFileDeleteException,
ImageFileNotFoundException,
ImageFileSaveException,
ImageFileStorageBase,
)
from invokeai.app.services.item_storage import ItemStorageABC, PaginatedResults
from invokeai.app.services.metadata import MetadataServiceBase
ImageFileDeleteException, ImageFileNotFoundException,
ImageFileSaveException, ImageFileStorageBase)
from invokeai.app.services.image_record_storage import (
ImageRecordDeleteException, ImageRecordNotFoundException,
ImageRecordSaveException, ImageRecordStorageBase, OffsetPaginatedResults)
from invokeai.app.services.item_storage import ItemStorageABC
from invokeai.app.services.models.image_record import (ImageDTO, ImageRecord,
ImageRecordChanges,
image_record_to_dto)
from invokeai.app.services.resource_name import NameServiceBase
from invokeai.app.services.urls import UrlServiceBase
from invokeai.app.util.metadata import get_metadata_graph_from_raw_session
if TYPE_CHECKING:
from invokeai.app.services.graph import GraphExecutionState
@ -51,6 +42,7 @@ class ImageServiceABC(ABC):
node_id: Optional[str] = None,
session_id: Optional[str] = None,
is_intermediate: bool = False,
metadata: Optional[dict] = None,
) -> ImageDTO:
"""Creates an image, storing the file and its metadata."""
pass
@ -79,6 +71,11 @@ class ImageServiceABC(ABC):
"""Gets an image DTO."""
pass
@abstractmethod
def get_metadata(self, image_name: str) -> ImageMetadata:
"""Gets an image's metadata."""
pass
@abstractmethod
def get_path(self, image_name: str, thumbnail: bool = False) -> str:
"""Gets an image's path."""
@ -124,7 +121,6 @@ class ImageServiceDependencies:
image_records: ImageRecordStorageBase
image_files: ImageFileStorageBase
board_image_records: BoardImageRecordStorageBase
metadata: MetadataServiceBase
urls: UrlServiceBase
logger: Logger
names: NameServiceBase
@ -135,7 +131,6 @@ class ImageServiceDependencies:
image_record_storage: ImageRecordStorageBase,
image_file_storage: ImageFileStorageBase,
board_image_record_storage: BoardImageRecordStorageBase,
metadata: MetadataServiceBase,
url: UrlServiceBase,
logger: Logger,
names: NameServiceBase,
@ -144,7 +139,6 @@ class ImageServiceDependencies:
self.image_records = image_record_storage
self.image_files = image_file_storage
self.board_image_records = board_image_record_storage
self.metadata = metadata
self.urls = url
self.logger = logger
self.names = names
@ -165,6 +159,7 @@ class ImageService(ImageServiceABC):
node_id: Optional[str] = None,
session_id: Optional[str] = None,
is_intermediate: bool = False,
metadata: Optional[dict] = None,
) -> ImageDTO:
if image_origin not in ResourceOrigin:
raise InvalidOriginException
@ -174,7 +169,16 @@ class ImageService(ImageServiceABC):
image_name = self._services.names.create_image_name()
metadata = self._get_metadata(session_id, node_id)
graph = None
if session_id is not None:
session_raw = self._services.graph_execution_manager.get_raw(session_id)
if session_raw is not None:
try:
graph = get_metadata_graph_from_raw_session(session_raw)
except Exception as e:
self._services.logger.warn(f"Failed to parse session graph: {e}")
graph = None
(width, height) = image.size
@ -191,14 +195,12 @@ class ImageService(ImageServiceABC):
is_intermediate=is_intermediate,
# Nullable fields
node_id=node_id,
session_id=session_id,
metadata=metadata,
session_id=session_id,
)
self._services.image_files.save(
image_name=image_name,
image=image,
metadata=metadata,
image_name=image_name, image=image, metadata=metadata, graph=graph
)
image_dto = self.get_dto(image_name)
@ -268,6 +270,34 @@ class ImageService(ImageServiceABC):
self._services.logger.error("Problem getting image DTO")
raise e
def get_metadata(self, image_name: str) -> Optional[ImageMetadata]:
try:
image_record = self._services.image_records.get(image_name)
if not image_record.session_id:
return ImageMetadata()
session_raw = self._services.graph_execution_manager.get_raw(
image_record.session_id
)
graph = None
if session_raw:
try:
graph = get_metadata_graph_from_raw_session(session_raw)
except Exception as e:
self._services.logger.warn(f"Failed to parse session graph: {e}")
graph = None
metadata = self._services.image_records.get_metadata(image_name)
return ImageMetadata(graph=graph, metadata=metadata)
except ImageRecordNotFoundException:
self._services.logger.error("Image record not found")
raise
except Exception as e:
self._services.logger.error("Problem getting image DTO")
raise e
def get_path(self, image_name: str, thumbnail: bool = False) -> str:
try:
return self._services.image_files.get_path(image_name, thumbnail)
@ -367,15 +397,3 @@ class ImageService(ImageServiceABC):
except Exception as e:
self._services.logger.error("Problem deleting image records and files")
raise e
def _get_metadata(
self, session_id: Optional[str] = None, node_id: Optional[str] = None
) -> Optional[ImageMetadata]:
"""Get the metadata for a node."""
metadata = None
if node_id is not None and session_id is not None:
session = self._services.graph_execution_manager.get(session_id)
metadata = self._services.metadata.create_image_metadata(session, node_id)
return metadata

View File

@ -1,5 +1,5 @@
from abc import ABC, abstractmethod
from typing import Callable, Generic, TypeVar
from typing import Callable, Generic, Optional, TypeVar
from pydantic import BaseModel, Field
from pydantic.generics import GenericModel
@ -29,14 +29,22 @@ class ItemStorageABC(ABC, Generic[T]):
@abstractmethod
def get(self, item_id: str) -> T:
"""Gets the item, parsing it into a Pydantic model"""
pass
@abstractmethod
def get_raw(self, item_id: str) -> Optional[str]:
"""Gets the raw item as a string, skipping Pydantic parsing"""
pass
@abstractmethod
def set(self, item: T) -> None:
"""Sets the item"""
pass
@abstractmethod
def list(self, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
"""Gets a paginated list of items"""
pass
@abstractmethod

View File

@ -1,142 +0,0 @@
from abc import ABC, abstractmethod
from typing import Any, Optional
import networkx as nx
from invokeai.app.models.metadata import ImageMetadata
from invokeai.app.services.graph import Graph, GraphExecutionState
class MetadataServiceBase(ABC):
"""Handles building metadata for nodes, images, and outputs."""
@abstractmethod
def create_image_metadata(
self, session: GraphExecutionState, node_id: str
) -> ImageMetadata:
"""Builds an ImageMetadata object for a node."""
pass
class CoreMetadataService(MetadataServiceBase):
_ANCESTOR_TYPES = ["t2l", "l2l"]
"""The ancestor types that contain the core metadata"""
_ANCESTOR_PARAMS = ["type", "steps", "model", "cfg_scale", "scheduler", "strength"]
"""The core metadata parameters in the ancestor types"""
_NOISE_FIELDS = ["seed", "width", "height"]
"""The core metadata parameters in the noise node"""
def create_image_metadata(
self, session: GraphExecutionState, node_id: str
) -> ImageMetadata:
metadata = self._build_metadata_from_graph(session, node_id)
return metadata
def _find_nearest_ancestor(self, G: nx.DiGraph, node_id: str) -> Optional[str]:
"""
Finds the id of the nearest ancestor (of a valid type) of a given node.
Parameters:
G (nx.DiGraph): The execution graph, converted in to a networkx DiGraph. Its nodes must
have the same data as the execution graph.
node_id (str): The ID of the node.
Returns:
str | None: The ID of the nearest ancestor, or None if there are no valid ancestors.
"""
# Retrieve the node from the graph
node = G.nodes[node_id]
# If the node type is one of the core metadata node types, return its id
if node.get("type") in self._ANCESTOR_TYPES:
return node.get("id")
# Else, look for the ancestor in the predecessor nodes
for predecessor in G.predecessors(node_id):
result = self._find_nearest_ancestor(G, predecessor)
if result:
return result
# If there are no valid ancestors, return None
return None
def _get_additional_metadata(
self, graph: Graph, node_id: str
) -> Optional[dict[str, Any]]:
"""
Returns additional metadata for a given node.
Parameters:
graph (Graph): The execution graph.
node_id (str): The ID of the node.
Returns:
dict[str, Any] | None: A dictionary of additional metadata.
"""
metadata = {}
# Iterate over all edges in the graph
for edge in graph.edges:
dest_node_id = edge.destination.node_id
dest_field = edge.destination.field
source_node_dict = graph.nodes[edge.source.node_id].dict()
# If the destination node ID matches the given node ID, gather necessary metadata
if dest_node_id == node_id:
# Prompt
if dest_field == "positive_conditioning":
metadata["positive_conditioning"] = source_node_dict.get("prompt")
# Negative prompt
if dest_field == "negative_conditioning":
metadata["negative_conditioning"] = source_node_dict.get("prompt")
# Seed, width and height
if dest_field == "noise":
for field in self._NOISE_FIELDS:
metadata[field] = source_node_dict.get(field)
return metadata
def _build_metadata_from_graph(
self, session: GraphExecutionState, node_id: str
) -> ImageMetadata:
"""
Builds an ImageMetadata object for a node.
Parameters:
session (GraphExecutionState): The session.
node_id (str): The ID of the node.
Returns:
ImageMetadata: The metadata for the node.
"""
# We need to do all the traversal on the execution graph
graph = session.execution_graph
# Find the nearest `t2l`/`l2l` ancestor of the given node
ancestor_id = self._find_nearest_ancestor(graph.nx_graph_with_data(), node_id)
# If no ancestor was found, return an empty ImageMetadata object
if ancestor_id is None:
return ImageMetadata()
ancestor_node = graph.get_node(ancestor_id)
# Grab all the core metadata from the ancestor node
ancestor_metadata = {
param: val
for param, val in ancestor_node.dict().items()
if param in self._ANCESTOR_PARAMS
}
# Get this image's prompts and noise parameters
addl_metadata = self._get_additional_metadata(graph, ancestor_id)
# If additional metadata was found, add it to the main metadata
if addl_metadata is not None:
ancestor_metadata.update(addl_metadata)
return ImageMetadata(**ancestor_metadata)

View File

@ -258,9 +258,7 @@ class ModelManagerService(ModelManagerServiceBase):
config_file = config.model_conf_path
else:
config_file = config.root_dir / "configs/models.yaml"
if not config_file.exists():
raise IOError(f"The file {config_file} could not be found.")
logger.debug(f'config file={config_file}')
device = torch.device(choose_torch_device())

View File

@ -1,13 +1,14 @@
import datetime
from typing import Optional, Union
from pydantic import BaseModel, Extra, Field, StrictBool, StrictStr
from invokeai.app.models.image import ImageCategory, ResourceOrigin
from invokeai.app.models.metadata import ImageMetadata
from invokeai.app.util.misc import get_iso_timestamp
class ImageRecord(BaseModel):
"""Deserialized image record."""
"""Deserialized image record without metadata."""
image_name: str = Field(description="The unique name of the image.")
"""The unique name of the image."""
@ -43,11 +44,6 @@ class ImageRecord(BaseModel):
description="The node ID that generated this image, if it is a generated image.",
)
"""The node ID that generated this image, if it is a generated image."""
metadata: Optional[ImageMetadata] = Field(
default=None,
description="A limited subset of the image's generation metadata. Retrieve the image's session for full metadata.",
)
"""A limited subset of the image's generation metadata. Retrieve the image's session for full metadata."""
class ImageRecordChanges(BaseModel, extra=Extra.forbid):
@ -112,6 +108,7 @@ def deserialize_image_record(image_dict: dict) -> ImageRecord:
# Retrieve all the values, setting "reasonable" defaults if they are not present.
# TODO: do we really need to handle default values here? ideally the data is the correct shape...
image_name = image_dict.get("image_name", "unknown")
image_origin = ResourceOrigin(
image_dict.get("image_origin", ResourceOrigin.INTERNAL.value)
@ -128,13 +125,6 @@ def deserialize_image_record(image_dict: dict) -> ImageRecord:
deleted_at = image_dict.get("deleted_at", get_iso_timestamp())
is_intermediate = image_dict.get("is_intermediate", False)
raw_metadata = image_dict.get("metadata")
if raw_metadata is not None:
metadata = ImageMetadata.parse_raw(raw_metadata)
else:
metadata = None
return ImageRecord(
image_name=image_name,
image_origin=image_origin,
@ -143,7 +133,6 @@ def deserialize_image_record(image_dict: dict) -> ImageRecord:
height=height,
session_id=session_id,
node_id=node_id,
metadata=metadata,
created_at=created_at,
updated_at=updated_at,
deleted_at=deleted_at,

View File

@ -104,6 +104,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
except Exception as e:
error = traceback.format_exc()
logger.error(error)
# Save error
graph_execution_state.set_node_error(invocation.id, error)

View File

@ -1,6 +1,6 @@
import sqlite3
from threading import Lock
from typing import Generic, TypeVar, Optional, Union, get_args
from typing import Generic, Optional, TypeVar, get_args
from pydantic import BaseModel, parse_raw_as
@ -78,6 +78,21 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
return self._parse_item(result[0])
def get_raw(self, id: str) -> Optional[str]:
try:
self._lock.acquire()
self._cursor.execute(
f"""SELECT item FROM {self._table_name} WHERE id = ?;""", (str(id),)
)
result = self._cursor.fetchone()
finally:
self._lock.release()
if not result:
return None
return result[0]
def delete(self, id: str):
try:
self._lock.acquire()

View File

@ -22,4 +22,4 @@ class LocalUrlService(UrlServiceBase):
if thumbnail:
return f"{self._base_url}/images/{image_basename}/thumbnail"
return f"{self._base_url}/images/{image_basename}"
return f"{self._base_url}/images/{image_basename}/full"

View File

@ -0,0 +1,55 @@
import json
from typing import Optional
from pydantic import ValidationError
from invokeai.app.services.graph import Edge
def get_metadata_graph_from_raw_session(session_raw: str) -> Optional[dict]:
"""
Parses raw session string, returning a dict of the graph.
Only the general graph shape is validated; none of the fields are validated.
Any `metadata_accumulator` nodes and edges are removed.
Any validation failure will return None.
"""
graph = json.loads(session_raw).get("graph", None)
# sanity check make sure the graph is at least reasonably shaped
if (
type(graph) is not dict
or "nodes" not in graph
or type(graph["nodes"]) is not dict
or "edges" not in graph
or type(graph["edges"]) is not list
):
# something has gone terribly awry, return an empty dict
return None
try:
# delete the `metadata_accumulator` node
del graph["nodes"]["metadata_accumulator"]
except KeyError:
# no accumulator node, all good
pass
# delete any edges to or from it
for i, edge in enumerate(graph["edges"]):
try:
# try to parse the edge
Edge(**edge)
except ValidationError:
# something has gone terribly awry, return an empty dict
return None
if (
edge["source"]["node_id"] == "metadata_accumulator"
or edge["destination"]["node_id"] == "metadata_accumulator"
):
del graph["edges"][i]
return graph

View File

@ -121,8 +121,8 @@ class ModelInstall(object):
installed_models = self.mgr.list_models()
for md in installed_models:
base = md['base_model']
model_type = md['type']
name = md['name']
model_type = md['model_type']
name = md['model_name']
key = ModelManager.create_key(name, base, model_type)
if key in model_dict:
model_dict[key].installed = True

View File

@ -36,6 +36,9 @@ from .models import BaseModelType, ModelType, SubModelType, ModelBase
# Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously
DEFAULT_MAX_CACHE_SIZE = 6.0
# amount of GPU memory to hold in reserve for use by generations (GB)
DEFAULT_MAX_VRAM_CACHE_SIZE= 2.75
# actual size of a gig
GIG = 1073741824
@ -82,6 +85,7 @@ class ModelCache(object):
def __init__(
self,
max_cache_size: float=DEFAULT_MAX_CACHE_SIZE,
max_vram_cache_size: float=DEFAULT_MAX_VRAM_CACHE_SIZE,
execution_device: torch.device=torch.device('cuda'),
storage_device: torch.device=torch.device('cpu'),
precision: torch.dtype=torch.float16,
@ -99,12 +103,11 @@ class ModelCache(object):
:param sequential_offload: Conserve VRAM by loading and unloading each stage of the pipeline sequentially
:param sha_chunksize: Chunksize to use when calculating sha256 model hash
'''
#max_cache_size = 9999
self.model_infos: Dict[str, ModelBase] = dict()
self.lazy_offloading = lazy_offloading
#self.sequential_offload: bool=sequential_offload
self.precision: torch.dtype=precision
self.max_cache_size: int=max_cache_size
self.max_cache_size: float=max_cache_size
self.max_vram_cache_size: float=max_vram_cache_size
self.execution_device: torch.device=execution_device
self.storage_device: torch.device=storage_device
self.sha_chunksize=sha_chunksize
@ -201,14 +204,22 @@ class ModelCache(object):
self._cache_stack.remove(key)
self._cache_stack.append(key)
return self.ModelLocker(self, key, cache_entry.model, gpu_load)
return self.ModelLocker(self, key, cache_entry.model, gpu_load, cache_entry.size)
class ModelLocker(object):
def __init__(self, cache, key, model, gpu_load):
def __init__(self, cache, key, model, gpu_load, size_needed):
'''
:param cache: The model_cache object
:param key: The key of the model to lock in GPU
:param model: The model to lock
:param gpu_load: True if load into gpu
:param size_needed: Size of the model to load
'''
self.gpu_load = gpu_load
self.cache = cache
self.key = key
self.model = model
self.size_needed = size_needed
self.cache_entry = self.cache._cached_models[self.key]
def __enter__(self) -> Any:
@ -222,7 +233,7 @@ class ModelCache(object):
try:
if self.cache.lazy_offloading:
self.cache._offload_unlocked_models()
self.cache._offload_unlocked_models(self.size_needed)
if self.model.device != self.cache.execution_device:
self.cache.logger.debug(f'Moving {self.key} into {self.cache.execution_device}')
@ -337,14 +348,20 @@ class ModelCache(object):
self.logger.debug(f"After unloading: cached_models={len(self._cached_models)}")
def _offload_unlocked_models(self):
for model_key, cache_entry in self._cached_models.items():
def _offload_unlocked_models(self, size_needed: int=0):
reserved = self.max_vram_cache_size * GIG
vram_in_use = torch.cuda.memory_allocated()
self.logger.debug(f'{(vram_in_use/GIG):.2f}GB VRAM used for models; max allowed={(reserved/GIG):.2f}GB')
for model_key, cache_entry in sorted(self._cached_models.items(), key=lambda x:x[1].size):
if vram_in_use <= reserved:
break
if not cache_entry.locked and cache_entry.loaded:
self.logger.debug(f'Offloading {model_key} from {self.execution_device} into {self.storage_device}')
with VRAMUsage() as mem:
cache_entry.model.to(self.storage_device)
self.logger.debug(f'GPU VRAM freed: {(mem.vram_used/GIG):.2f} GB')
vram_in_use += mem.vram_used # note vram_used is negative
self.logger.debug(f'{(vram_in_use/GIG):.2f}GB VRAM used for models; max allowed={(reserved/GIG):.2f}GB')
def _local_model_hash(self, model_path: Union[str, Path]) -> str:
sha = hashlib.sha256()

View File

@ -231,6 +231,7 @@ from __future__ import annotations
import os
import hashlib
import textwrap
import yaml
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, List, Tuple, Union, Dict, Set, Callable, types
@ -249,8 +250,8 @@ from .model_cache import ModelCache, ModelLocker
from .models import (
BaseModelType, ModelType, SubModelType,
ModelError, SchedulerPredictionType, MODEL_CLASSES,
ModelConfigBase, ModelNotFoundException,
)
ModelConfigBase, ModelNotFoundException, InvalidModelException,
)
# We are only starting to number the config file with release 3.
# The config file version doesn't have to start at release version, but it will help
@ -274,10 +275,6 @@ class ModelInfo():
def __exit__(self,*args, **kwargs):
self.context.__exit__(*args, **kwargs)
class InvalidModelError(Exception):
"Raised when an invalid model is requested"
pass
class AddModelResult(BaseModel):
name: str = Field(description="The name of the model after installation")
model_type: ModelType = Field(description="The type of model")
@ -314,6 +311,9 @@ class ModelManager(object):
self.config_path = None
if isinstance(config, (str, Path)):
self.config_path = Path(config)
if not self.config_path.exists():
logger.warning(f'The file {self.config_path} was not found. Initializing a new file')
self.initialize_model_config(self.config_path)
config = OmegaConf.load(self.config_path)
elif not isinstance(config, DictConfig):
@ -336,6 +336,7 @@ class ModelManager(object):
self.logger = logger
self.cache = ModelCache(
max_cache_size=max_cache_size,
max_vram_cache_size = self.app_config.max_vram_cache_size,
execution_device = device_type,
precision = precision,
sequential_offload = sequential_offload,
@ -386,6 +387,16 @@ class ModelManager(object):
def _get_model_cache_path(self, model_path):
return self.app_config.models_path / ".cache" / hashlib.md5(str(model_path).encode()).hexdigest()
@classmethod
def initialize_model_config(cls, config_path: Path):
"""Create empty config file"""
with open(config_path,'w') as yaml_file:
yaml_file.write(yaml.dump({'__metadata__':
{'version':'3.0.0'}
}
)
)
def get_model(
self,
model_name: str,
@ -527,9 +538,9 @@ class ModelManager(object):
model_dict = dict(
**model_config.dict(exclude_defaults=True),
# OpenAPIModelInfoBase
name=cur_model_name,
model_name=cur_model_name,
base_model=cur_base_model,
type=cur_model_type,
model_type=cur_model_type,
)
models.append(model_dict)
@ -802,6 +813,8 @@ class ModelManager(object):
model_config: ModelConfigBase = model_class.probe_config(str(model_path))
self.models[model_key] = model_config
new_models_found = True
except InvalidModelException:
self.logger.warning(f"Not a valid model: {model_path}")
except NotImplementedError as e:
self.logger.warning(e)
@ -853,16 +866,22 @@ class ModelManager(object):
scanned_dirs.add(path)
continue
if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin','pytorch_lora_weights.bin'}]):
new_models_found.update(installer.heuristic_import(path))
scanned_dirs.add(path)
try:
new_models_found.update(installer.heuristic_import(path))
scanned_dirs.add(path)
except ValueError as e:
self.logger.warning(str(e))
for f in files:
path = Path(root) / f
if path in known_paths or path.parent in scanned_dirs:
continue
if path.suffix in {'.ckpt','.bin','.pth','.safetensors','.pt'}:
import_result = installer.heuristic_import(path)
new_models_found.update(import_result)
try:
import_result = installer.heuristic_import(path)
new_models_found.update(import_result)
except ValueError as e:
self.logger.warning(str(e))
self.logger.info(f'Scanned {items_scanned} files and directories, imported {len(new_models_found)} models')
installed.update(new_models_found)

View File

@ -59,7 +59,7 @@ class ModelProbe(object):
elif isinstance(model,(dict,ModelMixin,ConfigMixin)):
return cls.probe(model_path=None, model=model, prediction_type_helper=prediction_type_helper)
else:
raise Exception("model parameter {model} is neither a Path, nor a model")
raise ValueError("model parameter {model} is neither a Path, nor a model")
@classmethod
def probe(cls,
@ -237,7 +237,7 @@ class CheckpointProbeBase(ProbeBase):
elif in_channels == 4:
return ModelVariantType.Normal
else:
raise Exception("Cannot determine variant type")
raise ValueError(f"Cannot determine variant type (in_channels={in_channels}) at {self.checkpoint_path}")
class PipelineCheckpointProbe(CheckpointProbeBase):
def get_base_type(self)->BaseModelType:
@ -248,7 +248,7 @@ class PipelineCheckpointProbe(CheckpointProbeBase):
return BaseModelType.StableDiffusion1
if key_name in state_dict and state_dict[key_name].shape[-1] == 1024:
return BaseModelType.StableDiffusion2
raise Exception("Cannot determine base type")
raise ValueError("Cannot determine base type")
def get_scheduler_prediction_type(self)->SchedulerPredictionType:
type = self.get_base_type()
@ -329,7 +329,7 @@ class ControlNetCheckpointProbe(CheckpointProbeBase):
return BaseModelType.StableDiffusion2
elif self.checkpoint_path and self.helper:
return self.helper(self.checkpoint_path)
raise Exception("Unable to determine base type for {self.checkpoint_path}")
raise ValueError("Unable to determine base type for {self.checkpoint_path}")
########################################################
# classes for probing folders
@ -418,7 +418,7 @@ class ControlNetFolderProbe(FolderProbeBase):
def get_base_type(self)->BaseModelType:
config_file = self.folder_path / 'config.json'
if not config_file.exists():
raise Exception(f"Cannot determine base type for {self.folder_path}")
raise ValueError(f"Cannot determine base type for {self.folder_path}")
with open(config_file,'r') as file:
config = json.load(file)
# no obvious way to distinguish between sd2-base and sd2-768
@ -435,7 +435,7 @@ class LoRAFolderProbe(FolderProbeBase):
model_file = base_file
break
if not model_file:
raise Exception('Unknown LoRA format encountered')
raise ValueError('Unknown LoRA format encountered')
return LoRACheckpointProbe(model_file,None).get_base_type()
############## register probe classes ######

View File

@ -2,7 +2,7 @@ import inspect
from enum import Enum
from pydantic import BaseModel
from typing import Literal, get_origin
from .base import BaseModelType, ModelType, SubModelType, ModelBase, ModelConfigBase, ModelVariantType, SchedulerPredictionType, ModelError, SilenceWarnings, ModelNotFoundException
from .base import BaseModelType, ModelType, SubModelType, ModelBase, ModelConfigBase, ModelVariantType, SchedulerPredictionType, ModelError, SilenceWarnings, ModelNotFoundException, InvalidModelException
from .stable_diffusion import StableDiffusion1Model, StableDiffusion2Model
from .vae import VaeModel
from .lora import LoRAModel
@ -37,9 +37,9 @@ MODEL_CONFIGS = list()
OPENAPI_MODEL_CONFIGS = list()
class OpenAPIModelInfoBase(BaseModel):
name: str
model_name: str
base_model: BaseModelType
type: ModelType
model_type: ModelType
for base_model, models in MODEL_CLASSES.items():
@ -56,7 +56,7 @@ for base_model, models in MODEL_CLASSES.items():
api_wrapper = type(openapi_cfg_name, (cfg, OpenAPIModelInfoBase), dict(
__annotations__ = dict(
type=Literal[model_type.value],
model_type=Literal[model_type.value],
),
))

View File

@ -15,6 +15,9 @@ from contextlib import suppress
from pydantic import BaseModel, Field
from typing import List, Dict, Optional, Type, Literal, TypeVar, Generic, Callable, Any, Union
class InvalidModelException(Exception):
pass
class ModelNotFoundException(Exception):
pass

View File

@ -13,6 +13,7 @@ from .base import (
calc_model_size_by_fs,
calc_model_size_by_data,
classproperty,
InvalidModelException,
)
class ControlNetModelFormat(str, Enum):
@ -73,10 +74,18 @@ class ControlNetModel(ModelBase):
@classmethod
def detect_format(cls, path: str):
if not os.path.exists(path):
raise ModelNotFoundException()
if os.path.isdir(path):
return ControlNetModelFormat.Diffusers
else:
return ControlNetModelFormat.Checkpoint
if os.path.exists(os.path.join(path, "config.json")):
return ControlNetModelFormat.Diffusers
if os.path.isfile(path):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt", "pth"]]):
return ControlNetModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {path}")
@classmethod
def convert_if_required(

View File

@ -9,6 +9,7 @@ from .base import (
ModelType,
SubModelType,
classproperty,
InvalidModelException,
)
# TODO: naming
from ..lora import LoRAModel as LoRAModelRaw
@ -56,10 +57,18 @@ class LoRAModel(ModelBase):
@classmethod
def detect_format(cls, path: str):
if not os.path.exists(path):
raise ModelNotFoundException()
if os.path.isdir(path):
return LoRAModelFormat.Diffusers
else:
return LoRAModelFormat.LyCORIS
if os.path.exists(os.path.join(path, "pytorch_lora_weights.bin")):
return LoRAModelFormat.Diffusers
if os.path.isfile(path):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return LoRAModelFormat.LyCORIS
raise InvalidModelException(f"Not a valid model: {path}")
@classmethod
def convert_if_required(

View File

@ -16,6 +16,7 @@ from .base import (
SilenceWarnings,
read_checkpoint_meta,
classproperty,
InvalidModelException,
)
from invokeai.app.services.config import InvokeAIAppConfig
from omegaconf import OmegaConf
@ -98,10 +99,18 @@ class StableDiffusion1Model(DiffusersModel):
@classmethod
def detect_format(cls, model_path: str):
if not os.path.exists(model_path):
raise ModelNotFoundException()
if os.path.isdir(model_path):
return StableDiffusion1ModelFormat.Diffusers
else:
return StableDiffusion1ModelFormat.Checkpoint
if os.path.exists(os.path.join(model_path, "model_index.json")):
return StableDiffusion1ModelFormat.Diffusers
if os.path.isfile(model_path):
if any([model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return StableDiffusion1ModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {model_path}")
@classmethod
def convert_if_required(
@ -200,10 +209,18 @@ class StableDiffusion2Model(DiffusersModel):
@classmethod
def detect_format(cls, model_path: str):
if not os.path.exists(model_path):
raise ModelNotFoundException()
if os.path.isdir(model_path):
return StableDiffusion2ModelFormat.Diffusers
else:
return StableDiffusion2ModelFormat.Checkpoint
if os.path.exists(os.path.join(model_path, "model_index.json")):
return StableDiffusion2ModelFormat.Diffusers
if os.path.isfile(model_path):
if any([model_path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return StableDiffusion2ModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {model_path}")
@classmethod
def convert_if_required(

View File

@ -9,6 +9,7 @@ from .base import (
SubModelType,
classproperty,
ModelNotFoundException,
InvalidModelException,
)
# TODO: naming
from ..lora import TextualInversionModel as TextualInversionModelRaw
@ -59,7 +60,18 @@ class TextualInversionModel(ModelBase):
@classmethod
def detect_format(cls, path: str):
return None
if not os.path.exists(path):
raise ModelNotFoundException()
if os.path.isdir(path):
if os.path.exists(os.path.join(path, "learned_embeds.bin")):
return None # diffusers-ti
if os.path.isfile(path):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return None
raise InvalidModelException(f"Not a valid model: {path}")
@classmethod
def convert_if_required(

View File

@ -15,6 +15,7 @@ from .base import (
calc_model_size_by_fs,
calc_model_size_by_data,
classproperty,
InvalidModelException,
)
from invokeai.app.services.config import InvokeAIAppConfig
from diffusers.utils import is_safetensors_available
@ -75,10 +76,18 @@ class VaeModel(ModelBase):
@classmethod
def detect_format(cls, path: str):
if not os.path.exists(path):
raise ModelNotFoundException()
if os.path.isdir(path):
return VaeModelFormat.Diffusers
else:
return VaeModelFormat.Checkpoint
if os.path.exists(os.path.join(path, "config.json")):
return VaeModelFormat.Diffusers
if os.path.isfile(path):
if any([path.endswith(f".{ext}") for ext in ["safetensors", "ckpt", "pt"]]):
return VaeModelFormat.Checkpoint
raise InvalidModelException(f"Not a valid model: {path}")
@classmethod
def convert_if_required(

View File

@ -127,7 +127,7 @@ class AddsMaskGuidance:
def _t_for_field(self, field_name: str, t):
if field_name == "pred_original_sample":
return torch.zeros_like(t, dtype=t.dtype) # it represents t=0
return self.scheduler.timesteps[-1]
return t
def apply_mask(self, latents: torch.Tensor, t) -> torch.Tensor:
@ -631,7 +631,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
control_latent_input = torch.cat([unet_latent_input] * 2)
if cfg_injection: # only applying ControlNet to conditional instead of in unconditioned
encoder_hidden_states = torch.cat([conditioning_data.unconditioned_embeddings])
encoder_hidden_states = conditioning_data.text_embeddings
else:
encoder_hidden_states = torch.cat([conditioning_data.unconditioned_embeddings,
conditioning_data.text_embeddings])

View File

@ -773,7 +773,7 @@ def main():
config.parse_args(invoke_args)
logger = InvokeAILogger().getLogger(config=config)
if not (config.root_dir / config.conf_path.parent).exists():
if not (config.conf_path / 'models.yaml').exists():
logger.info(
"Your InvokeAI root directory is not set up. Calling invokeai-configure."
)

View File

@ -36,6 +36,7 @@ module.exports = {
],
'prettier/prettier': ['error', { endOfLine: 'auto' }],
'@typescript-eslint/ban-ts-comment': 'warn',
'@typescript-eslint/no-explicit-any': 'warn',
'@typescript-eslint/no-empty-interface': [
'error',
{

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -12,7 +12,7 @@
margin: 0;
}
</style>
<script type="module" crossorigin src="./assets/index-581af3d4.js"></script>
<script type="module" crossorigin src="./assets/index-078526aa.js"></script>
</head>
<body dir="ltr">

View File

@ -53,7 +53,7 @@
"linear": "Linear",
"nodes": "Node Editor",
"batch": "Batch Manager",
"modelmanager": "Model Manager",
"modelManager": "Model Manager",
"postprocessing": "Post Processing",
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
"postProcessing": "Post Processing",
@ -527,7 +527,9 @@
"showOptionsPanel": "Show Options Panel",
"hidePreview": "Hide Preview",
"showPreview": "Show Preview",
"controlNetControlMode": "Control Mode"
"controlNetControlMode": "Control Mode",
"clipSkip": "Clip Skip",
"aspectRatio": "Ratio"
},
"settings": {
"models": "Models",
@ -551,7 +553,8 @@
"generation": "Generation",
"ui": "User Interface",
"favoriteSchedulers": "Favorite Schedulers",
"favoriteSchedulersPlaceholder": "No schedulers favorited"
"favoriteSchedulersPlaceholder": "No schedulers favorited",
"showAdvancedOptions": "Show Advanced Options"
},
"toast": {
"serverError": "Server Error",
@ -669,6 +672,7 @@
},
"ui": {
"showProgressImages": "Show Progress Images",
"hideProgressImages": "Hide Progress Images"
"hideProgressImages": "Hide Progress Images",
"swapSizes": "Swap Sizes"
}
}

View File

@ -108,6 +108,7 @@
"roarr": "^7.15.0",
"serialize-error": "^11.0.0",
"socket.io-client": "^4.7.0",
"use-debounce": "^9.0.4",
"use-image": "^1.1.1",
"uuid": "^9.0.0",
"zod": "^3.21.4"

View File

@ -53,7 +53,7 @@
"linear": "Linear",
"nodes": "Node Editor",
"batch": "Batch Manager",
"modelmanager": "Model Manager",
"modelManager": "Model Manager",
"postprocessing": "Post Processing",
"nodesDesc": "A node based system for the generation of images is under development currently. Stay tuned for updates about this amazing feature.",
"postProcessing": "Post Processing",
@ -102,7 +102,8 @@
"openInNewTab": "Open in New Tab",
"dontAskMeAgain": "Don't ask me again",
"areYouSure": "Are you sure?",
"imagePrompt": "Image Prompt"
"imagePrompt": "Image Prompt",
"clearNodes": "Are you sure you want to clear all nodes?"
},
"gallery": {
"generations": "Generations",
@ -118,7 +119,7 @@
"pinGallery": "Pin Gallery",
"allImagesLoaded": "All Images Loaded",
"loadMore": "Load More",
"noImagesInGallery": "No Images In Gallery",
"noImagesInGallery": "No Images to Display",
"deleteImage": "Delete Image",
"deleteImageBin": "Deleted images will be sent to your operating system's Bin.",
"deleteImagePermanent": "Deleted images cannot be restored.",
@ -528,7 +529,8 @@
"hidePreview": "Hide Preview",
"showPreview": "Show Preview",
"controlNetControlMode": "Control Mode",
"clipSkip": "Clip Skip"
"clipSkip": "CLIP Skip",
"aspectRatio": "Ratio"
},
"settings": {
"models": "Models",
@ -592,7 +594,12 @@
"metadataLoadFailed": "Failed to load metadata",
"initialImageSet": "Initial Image Set",
"initialImageNotSet": "Initial Image Not Set",
"initialImageNotSetDesc": "Could not load initial image"
"initialImageNotSetDesc": "Could not load initial image",
"nodesSaved": "Nodes Saved",
"nodesLoaded": "Nodes Loaded",
"nodesLoadedFailed": "Failed To Load Nodes",
"nodesCleared": "Nodes Cleared"
},
"tooltip": {
"feature": {
@ -671,6 +678,13 @@
},
"ui": {
"showProgressImages": "Show Progress Images",
"hideProgressImages": "Hide Progress Images"
"hideProgressImages": "Hide Progress Images",
"swapSizes": "Swap Sizes"
},
"nodes": {
"reloadSchema": "Reload Schema",
"saveNodes": "Save Nodes",
"loadNodes": "Load Nodes",
"clearNodes": "Clear Nodes"
}
}

View File

@ -6,9 +6,7 @@ import { PartialAppConfig } from 'app/types/invokeai';
import ImageUploader from 'common/components/ImageUploader';
import GalleryDrawer from 'features/gallery/components/GalleryPanel';
import DeleteImageModal from 'features/imageDeletion/components/DeleteImageModal';
import Lightbox from 'features/lightbox/components/Lightbox';
import SiteHeader from 'features/system/components/SiteHeader';
import { useFeatureStatus } from 'features/system/hooks/useFeatureStatus';
import { configChanged } from 'features/system/store/configSlice';
import { languageSelector } from 'features/system/store/systemSelectors';
import FloatingGalleryButton from 'features/ui/components/FloatingGalleryButton';
@ -34,8 +32,6 @@ const App = ({ config = DEFAULT_CONFIG, headerComponent }: Props) => {
const log = useLogger();
const isLightboxEnabled = useFeatureStatus('lightbox').isFeatureEnabled;
const dispatch = useAppDispatch();
useEffect(() => {
@ -54,7 +50,6 @@ const App = ({ config = DEFAULT_CONFIG, headerComponent }: Props) => {
return (
<>
<Grid w="100vw" h="100vh" position="relative" overflow="hidden">
{isLightboxEnabled && <Lightbox />}
<ImageUploader>
<Grid
sx={{

View File

@ -1,8 +1,4 @@
import { Box, ChakraProps, Flex, Heading, Image } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit';
import { stateSelector } from 'app/store/store';
import { useAppSelector } from 'app/store/storeHooks';
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
import { memo } from 'react';
import { TypesafeDraggableData } from './typesafeDnd';
@ -32,24 +28,7 @@ const STYLES: ChakraProps['sx'] = {
},
};
const selector = createSelector(
stateSelector,
(state) => {
const gallerySelectionCount = state.gallery.selection.length;
const batchSelectionCount = state.batch.selection.length;
return {
gallerySelectionCount,
batchSelectionCount,
};
},
defaultSelectorOptions
);
const DragPreview = (props: OverlayDragImageProps) => {
const { gallerySelectionCount, batchSelectionCount } =
useAppSelector(selector);
if (!props.dragData) {
return;
}
@ -82,7 +61,7 @@ const DragPreview = (props: OverlayDragImageProps) => {
);
}
if (props.dragData.payloadType === 'BATCH_SELECTION') {
if (props.dragData.payloadType === 'IMAGE_NAMES') {
return (
<Flex
sx={{
@ -95,26 +74,7 @@ const DragPreview = (props: OverlayDragImageProps) => {
...STYLES,
}}
>
<Heading>{batchSelectionCount}</Heading>
<Heading size="sm">Images</Heading>
</Flex>
);
}
if (props.dragData.payloadType === 'GALLERY_SELECTION') {
return (
<Flex
sx={{
cursor: 'none',
userSelect: 'none',
position: 'relative',
alignItems: 'center',
justifyContent: 'center',
flexDir: 'column',
...STYLES,
}}
>
<Heading>{gallerySelectionCount}</Heading>
<Heading>{props.dragData.payload.image_names.length}</Heading>
<Heading size="sm">Images</Heading>
</Flex>
);

View File

@ -6,18 +6,18 @@ import {
useSensor,
useSensors,
} from '@dnd-kit/core';
import { snapCenterToCursor } from '@dnd-kit/modifiers';
import { dndDropped } from 'app/store/middleware/listenerMiddleware/listeners/imageDropped';
import { useAppDispatch } from 'app/store/storeHooks';
import { AnimatePresence, motion } from 'framer-motion';
import { PropsWithChildren, memo, useCallback, useState } from 'react';
import DragPreview from './DragPreview';
import { snapCenterToCursor } from '@dnd-kit/modifiers';
import { AnimatePresence, motion } from 'framer-motion';
import {
DndContext,
DragEndEvent,
DragStartEvent,
TypesafeDraggableData,
} from './typesafeDnd';
import { useAppDispatch } from 'app/store/storeHooks';
import { imageDropped } from 'app/store/middleware/listenerMiddleware/listeners/imageDropped';
type ImageDndContextProps = PropsWithChildren;
@ -42,18 +42,18 @@ const ImageDndContext = (props: ImageDndContextProps) => {
if (!activeData || !overData) {
return;
}
dispatch(imageDropped({ overData, activeData }));
dispatch(dndDropped({ overData, activeData }));
setActiveDragData(null);
},
[dispatch]
);
const mouseSensor = useSensor(MouseSensor, {
activationConstraint: { delay: 150, tolerance: 5 },
activationConstraint: { distance: 10 },
});
const touchSensor = useSensor(TouchSensor, {
activationConstraint: { delay: 150, tolerance: 5 },
activationConstraint: { distance: 10 },
});
// TODO: Use KeyboardSensor - needs composition of multiple collisionDetection algos

View File

@ -77,18 +77,14 @@ export type ImageDraggableData = BaseDragData & {
payload: { imageDTO: ImageDTO };
};
export type GallerySelectionDraggableData = BaseDragData & {
payloadType: 'GALLERY_SELECTION';
};
export type BatchSelectionDraggableData = BaseDragData & {
payloadType: 'BATCH_SELECTION';
export type ImageNamesDraggableData = BaseDragData & {
payloadType: 'IMAGE_NAMES';
payload: { image_names: string[] };
};
export type TypesafeDraggableData =
| ImageDraggableData
| GallerySelectionDraggableData
| BatchSelectionDraggableData;
| ImageNamesDraggableData;
interface UseDroppableTypesafeArguments
extends Omit<UseDroppableArguments, 'data'> {
@ -159,13 +155,11 @@ export const isValidDrop = (
case 'SET_NODES_IMAGE':
return payloadType === 'IMAGE_DTO';
case 'SET_MULTI_NODES_IMAGE':
return payloadType === 'IMAGE_DTO' || 'GALLERY_SELECTION';
return payloadType === 'IMAGE_DTO' || 'IMAGE_NAMES';
case 'ADD_TO_BATCH':
return payloadType === 'IMAGE_DTO' || 'GALLERY_SELECTION';
return payloadType === 'IMAGE_DTO' || 'IMAGE_NAMES';
case 'MOVE_BOARD':
return (
payloadType === 'IMAGE_DTO' || 'GALLERY_SELECTION' || 'BATCH_SELECTION'
);
return payloadType === 'IMAGE_DTO' || 'IMAGE_NAMES';
default:
return false;
}

View File

@ -1,6 +1,7 @@
import { SchedulerParam } from 'features/parameters/store/parameterZodSchemas';
// zod needs the array to be `as const` to infer the type correctly
import { SchedulerParam } from 'features/parameters/types/parameterSchemas';
// this is the source of the `SchedulerParam` type, which is generated by zod
export const SCHEDULER_NAMES_AS_CONST = [
'euler',

View File

@ -1,67 +0,0 @@
// import { createAction } from '@reduxjs/toolkit';
// import * as InvokeAI from 'app/types/invokeai';
// import { GalleryCategory } from 'features/gallery/store/gallerySlice';
// import { InvokeTabName } from 'features/ui/store/tabMap';
// /**
// * We can't use redux-toolkit's createSlice() to make these actions,
// * because they have no associated reducer. They only exist to dispatch
// * requests to the server via socketio. These actions will be handled
// * by the middleware.
// */
// export const generateImage = createAction<InvokeTabName>(
// 'socketio/generateImage'
// );
// export const runESRGAN = createAction<InvokeAI._Image>('socketio/runESRGAN');
// export const runFacetool = createAction<InvokeAI._Image>(
// 'socketio/runFacetool'
// );
// export const deleteImage = createAction<InvokeAI._Image>(
// 'socketio/deleteImage'
// );
// export const requestImages = createAction<GalleryCategory>(
// 'socketio/requestImages'
// );
// export const requestNewImages = createAction<GalleryCategory>(
// 'socketio/requestNewImages'
// );
// export const cancelProcessing = createAction<undefined>(
// 'socketio/cancelProcessing'
// );
// export const requestSystemConfig = createAction<undefined>(
// 'socketio/requestSystemConfig'
// );
// export const searchForModels = createAction<string>('socketio/searchForModels');
// export const addNewModel = createAction<
// InvokeAI.InvokeModelConfigProps | InvokeAI.InvokeDiffusersModelConfigProps
// >('socketio/addNewModel');
// export const deleteModel = createAction<string>('socketio/deleteModel');
// export const convertToDiffusers =
// createAction<InvokeAI.InvokeModelConversionProps>(
// 'socketio/convertToDiffusers'
// );
// export const mergeDiffusersModels =
// createAction<InvokeAI.InvokeModelMergingProps>(
// 'socketio/mergeDiffusersModels'
// );
// export const requestModelChange = createAction<string>(
// 'socketio/requestModelChange'
// );
// export const saveStagingAreaImageToGallery = createAction<string>(
// 'socketio/saveStagingAreaImageToGallery'
// );
// export const emptyTempFolder = createAction<undefined>(
// 'socketio/requestEmptyTempFolder'
// );
export default {};

View File

@ -1,209 +0,0 @@
import { AnyAction, Dispatch, MiddlewareAPI } from '@reduxjs/toolkit';
import * as InvokeAI from 'app/types/invokeai';
import type { RootState } from 'app/store/store';
import {
frontendToBackendParameters,
FrontendToBackendParametersConfig,
} from 'common/util/parameterTranslation';
import dateFormat from 'dateformat';
import {
GalleryCategory,
GalleryState,
removeImage,
} from 'features/gallery/store/gallerySlice';
import {
generationRequested,
modelChangeRequested,
modelConvertRequested,
modelMergingRequested,
setIsProcessing,
} from 'features/system/store/systemSlice';
import { InvokeTabName } from 'features/ui/store/tabMap';
import { Socket } from 'socket.io-client';
/**
* Returns an object containing all functions which use `socketio.emit()`.
* i.e. those which make server requests.
*/
const makeSocketIOEmitters = (
store: MiddlewareAPI<Dispatch<AnyAction>, RootState>,
socketio: Socket
) => {
// We need to dispatch actions to redux and get pieces of state from the store.
const { dispatch, getState } = store;
return {
emitGenerateImage: (generationMode: InvokeTabName) => {
dispatch(setIsProcessing(true));
const state: RootState = getState();
const {
generation: generationState,
postprocessing: postprocessingState,
system: systemState,
canvas: canvasState,
} = state;
const frontendToBackendParametersConfig: FrontendToBackendParametersConfig =
{
generationMode,
generationState,
postprocessingState,
canvasState,
systemState,
};
dispatch(generationRequested());
const { generationParameters, esrganParameters, facetoolParameters } =
frontendToBackendParameters(frontendToBackendParametersConfig);
socketio.emit(
'generateImage',
generationParameters,
esrganParameters,
facetoolParameters
);
// we need to truncate the init_mask base64 else it takes up the whole log
// TODO: handle maintaining masks for reproducibility in future
if (generationParameters.init_mask) {
generationParameters.init_mask = generationParameters.init_mask
.substr(0, 64)
.concat('...');
}
if (generationParameters.init_img) {
generationParameters.init_img = generationParameters.init_img
.substr(0, 64)
.concat('...');
}
dispatch(
addLogEntry({
timestamp: dateFormat(new Date(), 'isoDateTime'),
message: `Image generation requested: ${JSON.stringify({
...generationParameters,
...esrganParameters,
...facetoolParameters,
})}`,
})
);
},
emitRunESRGAN: (imageToProcess: InvokeAI._Image) => {
dispatch(setIsProcessing(true));
const {
postprocessing: {
upscalingLevel,
upscalingDenoising,
upscalingStrength,
},
} = getState();
const esrganParameters = {
upscale: [upscalingLevel, upscalingDenoising, upscalingStrength],
};
socketio.emit('runPostprocessing', imageToProcess, {
type: 'esrgan',
...esrganParameters,
});
dispatch(
addLogEntry({
timestamp: dateFormat(new Date(), 'isoDateTime'),
message: `ESRGAN upscale requested: ${JSON.stringify({
file: imageToProcess.url,
...esrganParameters,
})}`,
})
);
},
emitRunFacetool: (imageToProcess: InvokeAI._Image) => {
dispatch(setIsProcessing(true));
const {
postprocessing: { facetoolType, facetoolStrength, codeformerFidelity },
} = getState();
const facetoolParameters: Record<string, unknown> = {
facetool_strength: facetoolStrength,
};
if (facetoolType === 'codeformer') {
facetoolParameters.codeformer_fidelity = codeformerFidelity;
}
socketio.emit('runPostprocessing', imageToProcess, {
type: facetoolType,
...facetoolParameters,
});
dispatch(
addLogEntry({
timestamp: dateFormat(new Date(), 'isoDateTime'),
message: `Face restoration (${facetoolType}) requested: ${JSON.stringify(
{
file: imageToProcess.url,
...facetoolParameters,
}
)}`,
})
);
},
emitDeleteImage: (imageToDelete: InvokeAI._Image) => {
const { url, uuid, category, thumbnail } = imageToDelete;
dispatch(removeImage(imageToDelete));
socketio.emit('deleteImage', url, thumbnail, uuid, category);
},
emitRequestImages: (category: GalleryCategory) => {
const gallery: GalleryState = getState().gallery;
const { earliest_mtime } = gallery.categories[category];
socketio.emit('requestImages', category, earliest_mtime);
},
emitRequestNewImages: (category: GalleryCategory) => {
const gallery: GalleryState = getState().gallery;
const { latest_mtime } = gallery.categories[category];
socketio.emit('requestLatestImages', category, latest_mtime);
},
emitCancelProcessing: () => {
socketio.emit('cancel');
},
emitRequestSystemConfig: () => {
socketio.emit('requestSystemConfig');
},
emitSearchForModels: (modelFolder: string) => {
socketio.emit('searchForModels', modelFolder);
},
emitAddNewModel: (modelConfig: InvokeAI.InvokeModelConfigProps) => {
socketio.emit('addNewModel', modelConfig);
},
emitDeleteModel: (modelName: string) => {
socketio.emit('deleteModel', modelName);
},
emitConvertToDiffusers: (
modelToConvert: InvokeAI.InvokeModelConversionProps
) => {
dispatch(modelConvertRequested());
socketio.emit('convertToDiffusers', modelToConvert);
},
emitMergeDiffusersModels: (
modelMergeInfo: InvokeAI.InvokeModelMergingProps
) => {
dispatch(modelMergingRequested());
socketio.emit('mergeDiffusersModels', modelMergeInfo);
},
emitRequestModelChange: (modelName: string) => {
dispatch(modelChangeRequested());
socketio.emit('requestModelChange', modelName);
},
emitSaveStagingAreaImageToGallery: (url: string) => {
socketio.emit('requestSaveStagingAreaImageToGallery', url);
},
emitRequestEmptyTempFolder: () => {
socketio.emit('requestEmptyTempFolder');
},
};
};
export default makeSocketIOEmitters;
export default {};

View File

@ -1,502 +0,0 @@
// import { AnyAction, Dispatch, MiddlewareAPI } from '@reduxjs/toolkit';
// import dateFormat from 'dateformat';
// import i18n from 'i18n';
// import { v4 as uuidv4 } from 'uuid';
// import * as InvokeAI from 'app/types/invokeai';
// import {
// addToast,
// errorOccurred,
// processingCanceled,
// setCurrentStatus,
// setFoundModels,
// setIsCancelable,
// setIsConnected,
// setIsProcessing,
// setModelList,
// setSearchFolder,
// setSystemConfig,
// setSystemStatus,
// } from 'features/system/store/systemSlice';
// import {
// addGalleryImages,
// addImage,
// clearIntermediateImage,
// GalleryState,
// removeImage,
// setIntermediateImage,
// } from 'features/gallery/store/gallerySlice';
// import type { RootState } from 'app/store/store';
// import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
// import {
// clearInitialImage,
// initialImageSelected,
// setInfillMethod,
// // setInitialImage,
// setMaskPath,
// } from 'features/parameters/store/generationSlice';
// import { tabMap } from 'features/ui/store/tabMap';
// import {
// requestImages,
// requestNewImages,
// requestSystemConfig,
// } from './actions';
// /**
// * Returns an object containing listener callbacks for socketio events.
// * TODO: This file is large, but simple. Should it be split up further?
// */
// const makeSocketIOListeners = (
// store: MiddlewareAPI<Dispatch<AnyAction>, RootState>
// ) => {
// const { dispatch, getState } = store;
// return {
// /**
// * Callback to run when we receive a 'connect' event.
// */
// onConnect: () => {
// try {
// dispatch(setIsConnected(true));
// dispatch(setCurrentStatus(i18n.t('common.statusConnected')));
// dispatch(requestSystemConfig());
// const gallery: GalleryState = getState().gallery;
// if (gallery.categories.result.latest_mtime) {
// dispatch(requestNewImages('result'));
// } else {
// dispatch(requestImages('result'));
// }
// if (gallery.categories.user.latest_mtime) {
// dispatch(requestNewImages('user'));
// } else {
// dispatch(requestImages('user'));
// }
// } catch (e) {
// console.error(e);
// }
// },
// /**
// * Callback to run when we receive a 'disconnect' event.
// */
// onDisconnect: () => {
// try {
// dispatch(setIsConnected(false));
// dispatch(setCurrentStatus(i18n.t('common.statusDisconnected')));
// dispatch(
// addLogEntry({
// timestamp: dateFormat(new Date(), 'isoDateTime'),
// message: `Disconnected from server`,
// level: 'warning',
// })
// );
// } catch (e) {
// console.error(e);
// }
// },
// /**
// * Callback to run when we receive a 'generationResult' event.
// */
// onGenerationResult: (data: InvokeAI.ImageResultResponse) => {
// try {
// const state = getState();
// const { activeTab } = state.ui;
// const { shouldLoopback } = state.postprocessing;
// const { boundingBox: _, generationMode, ...rest } = data;
// const newImage = {
// uuid: uuidv4(),
// ...rest,
// };
// if (['txt2img', 'img2img'].includes(generationMode)) {
// dispatch(
// addImage({
// category: 'result',
// image: { ...newImage, category: 'result' },
// })
// );
// }
// if (generationMode === 'unifiedCanvas' && data.boundingBox) {
// const { boundingBox } = data;
// dispatch(
// addImageToStagingArea({
// image: { ...newImage, category: 'temp' },
// boundingBox,
// })
// );
// if (state.canvas.shouldAutoSave) {
// dispatch(
// addImage({
// image: { ...newImage, category: 'result' },
// category: 'result',
// })
// );
// }
// }
// // TODO: fix
// // if (shouldLoopback) {
// // const activeTabName = tabMap[activeTab];
// // switch (activeTabName) {
// // case 'img2img': {
// // dispatch(initialImageSelected(newImage.uuid));
// // // dispatch(setInitialImage(newImage));
// // break;
// // }
// // }
// // }
// dispatch(clearIntermediateImage());
// dispatch(
// addLogEntry({
// timestamp: dateFormat(new Date(), 'isoDateTime'),
// message: `Image generated: ${data.url}`,
// })
// );
// } catch (e) {
// console.error(e);
// }
// },
// /**
// * Callback to run when we receive a 'intermediateResult' event.
// */
// onIntermediateResult: (data: InvokeAI.ImageResultResponse) => {
// try {
// dispatch(
// setIntermediateImage({
// uuid: uuidv4(),
// ...data,
// category: 'result',
// })
// );
// if (!data.isBase64) {
// dispatch(
// addLogEntry({
// timestamp: dateFormat(new Date(), 'isoDateTime'),
// message: `Intermediate image generated: ${data.url}`,
// })
// );
// }
// } catch (e) {
// console.error(e);
// }
// },
// /**
// * Callback to run when we receive an 'esrganResult' event.
// */
// onPostprocessingResult: (data: InvokeAI.ImageResultResponse) => {
// try {
// dispatch(
// addImage({
// category: 'result',
// image: {
// uuid: uuidv4(),
// ...data,
// category: 'result',
// },
// })
// );
// dispatch(
// addLogEntry({
// timestamp: dateFormat(new Date(), 'isoDateTime'),
// message: `Postprocessed: ${data.url}`,
// })
// );
// } catch (e) {
// console.error(e);
// }
// },
// /**
// * Callback to run when we receive a 'progressUpdate' event.
// * TODO: Add additional progress phases
// */
// onProgressUpdate: (data: InvokeAI.SystemStatus) => {
// try {
// dispatch(setIsProcessing(true));
// dispatch(setSystemStatus(data));
// } catch (e) {
// console.error(e);
// }
// },
// /**
// * Callback to run when we receive a 'progressUpdate' event.
// */
// onError: (data: InvokeAI.ErrorResponse) => {
// const { message, additionalData } = data;
// if (additionalData) {
// // TODO: handle more data than short message
// }
// try {
// dispatch(
// addLogEntry({
// timestamp: dateFormat(new Date(), 'isoDateTime'),
// message: `Server error: ${message}`,
// level: 'error',
// })
// );
// dispatch(errorOccurred());
// dispatch(clearIntermediateImage());
// } catch (e) {
// console.error(e);
// }
// },
// /**
// * Callback to run when we receive a 'galleryImages' event.
// */
// onGalleryImages: (data: InvokeAI.GalleryImagesResponse) => {
// const { images, areMoreImagesAvailable, category } = data;
// /**
// * the logic here ideally would be in the reducer but we have a side effect:
// * generating a uuid. so the logic needs to be here, outside redux.
// */
// // Generate a UUID for each image
// const preparedImages = images.map((image): InvokeAI._Image => {
// return {
// uuid: uuidv4(),
// ...image,
// };
// });
// dispatch(
// addGalleryImages({
// images: preparedImages,
// areMoreImagesAvailable,
// category,
// })
// );
// dispatch(
// addLogEntry({
// timestamp: dateFormat(new Date(), 'isoDateTime'),
// message: `Loaded ${images.length} images`,
// })
// );
// },
// /**
// * Callback to run when we receive a 'processingCanceled' event.
// */
// onProcessingCanceled: () => {
// dispatch(processingCanceled());
// const { intermediateImage } = getState().gallery;
// if (intermediateImage) {
// if (!intermediateImage.isBase64) {
// dispatch(
// addImage({
// category: 'result',
// image: intermediateImage,
// })
// );
// dispatch(
// addLogEntry({
// timestamp: dateFormat(new Date(), 'isoDateTime'),
// message: `Intermediate image saved: ${intermediateImage.url}`,
// })
// );
// }
// dispatch(clearIntermediateImage());
// }
// dispatch(
// addLogEntry({
// timestamp: dateFormat(new Date(), 'isoDateTime'),
// message: `Processing canceled`,
// level: 'warning',
// })
// );
// },
// /**
// * Callback to run when we receive a 'imageDeleted' event.
// */
// onImageDeleted: (data: InvokeAI.ImageDeletedResponse) => {
// const { url } = data;
// // remove image from gallery
// dispatch(removeImage(data));
// // remove references to image in options
// const {
// generation: { initialImage, maskPath },
// } = getState();
// if (
// initialImage === url ||
// (initialImage as InvokeAI._Image)?.url === url
// ) {
// dispatch(clearInitialImage());
// }
// if (maskPath === url) {
// dispatch(setMaskPath(''));
// }
// dispatch(
// addLogEntry({
// timestamp: dateFormat(new Date(), 'isoDateTime'),
// message: `Image deleted: ${url}`,
// })
// );
// },
// onSystemConfig: (data: InvokeAI.SystemConfig) => {
// dispatch(setSystemConfig(data));
// if (!data.infill_methods.includes('patchmatch')) {
// dispatch(setInfillMethod(data.infill_methods[0]));
// }
// },
// onFoundModels: (data: InvokeAI.FoundModelResponse) => {
// const { search_folder, found_models } = data;
// dispatch(setSearchFolder(search_folder));
// dispatch(setFoundModels(found_models));
// },
// onNewModelAdded: (data: InvokeAI.ModelAddedResponse) => {
// const { new_model_name, model_list, update } = data;
// dispatch(setModelList(model_list));
// dispatch(setIsProcessing(false));
// dispatch(setCurrentStatus(i18n.t('modelManager.modelAdded')));
// dispatch(
// addLogEntry({
// timestamp: dateFormat(new Date(), 'isoDateTime'),
// message: `Model Added: ${new_model_name}`,
// level: 'info',
// })
// );
// dispatch(
// addToast({
// title: !update
// ? `${i18n.t('modelManager.modelAdded')}: ${new_model_name}`
// : `${i18n.t('modelManager.modelUpdated')}: ${new_model_name}`,
// status: 'success',
// duration: 2500,
// isClosable: true,
// })
// );
// },
// onModelDeleted: (data: InvokeAI.ModelDeletedResponse) => {
// const { deleted_model_name, model_list } = data;
// dispatch(setModelList(model_list));
// dispatch(setIsProcessing(false));
// dispatch(
// addLogEntry({
// timestamp: dateFormat(new Date(), 'isoDateTime'),
// message: `${i18n.t(
// 'modelManager.modelAdded'
// )}: ${deleted_model_name}`,
// level: 'info',
// })
// );
// dispatch(
// addToast({
// title: `${i18n.t(
// 'modelManager.modelEntryDeleted'
// )}: ${deleted_model_name}`,
// status: 'success',
// duration: 2500,
// isClosable: true,
// })
// );
// },
// onModelConverted: (data: InvokeAI.ModelConvertedResponse) => {
// const { converted_model_name, model_list } = data;
// dispatch(setModelList(model_list));
// dispatch(setCurrentStatus(i18n.t('common.statusModelConverted')));
// dispatch(setIsProcessing(false));
// dispatch(setIsCancelable(true));
// dispatch(
// addLogEntry({
// timestamp: dateFormat(new Date(), 'isoDateTime'),
// message: `Model converted: ${converted_model_name}`,
// level: 'info',
// })
// );
// dispatch(
// addToast({
// title: `${i18n.t(
// 'modelManager.modelConverted'
// )}: ${converted_model_name}`,
// status: 'success',
// duration: 2500,
// isClosable: true,
// })
// );
// },
// onModelsMerged: (data: InvokeAI.ModelsMergedResponse) => {
// const { merged_models, merged_model_name, model_list } = data;
// dispatch(setModelList(model_list));
// dispatch(setCurrentStatus(i18n.t('common.statusMergedModels')));
// dispatch(setIsProcessing(false));
// dispatch(setIsCancelable(true));
// dispatch(
// addLogEntry({
// timestamp: dateFormat(new Date(), 'isoDateTime'),
// message: `Models merged: ${merged_models}`,
// level: 'info',
// })
// );
// dispatch(
// addToast({
// title: `${i18n.t('modelManager.modelsMerged')}: ${merged_model_name}`,
// status: 'success',
// duration: 2500,
// isClosable: true,
// })
// );
// },
// onModelChanged: (data: InvokeAI.ModelChangeResponse) => {
// const { model_name, model_list } = data;
// dispatch(setModelList(model_list));
// dispatch(setCurrentStatus(i18n.t('common.statusModelChanged')));
// dispatch(setIsProcessing(false));
// dispatch(setIsCancelable(true));
// dispatch(
// addLogEntry({
// timestamp: dateFormat(new Date(), 'isoDateTime'),
// message: `Model changed: ${model_name}`,
// level: 'info',
// })
// );
// },
// onModelChangeFailed: (data: InvokeAI.ModelChangeResponse) => {
// const { model_name, model_list } = data;
// dispatch(setModelList(model_list));
// dispatch(setIsProcessing(false));
// dispatch(setIsCancelable(true));
// dispatch(errorOccurred());
// dispatch(
// addLogEntry({
// timestamp: dateFormat(new Date(), 'isoDateTime'),
// message: `Model change failed: ${model_name}`,
// level: 'error',
// })
// );
// },
// onTempFolderEmptied: () => {
// dispatch(
// addToast({
// title: i18n.t('toast.tempFoldersEmptied'),
// status: 'success',
// duration: 2500,
// isClosable: true,
// })
// );
// },
// };
// };
// export default makeSocketIOListeners;
export default {};

View File

@ -1,248 +0,0 @@
// import { Middleware } from '@reduxjs/toolkit';
// import { io } from 'socket.io-client';
// import makeSocketIOEmitters from './emitters';
// import makeSocketIOListeners from './listeners';
// import * as InvokeAI from 'app/types/invokeai';
// /**
// * Creates a socketio middleware to handle communication with server.
// *
// * Special `socketio/actionName` actions are created in actions.ts and
// * exported for use by the application, which treats them like any old
// * action, using `dispatch` to dispatch them.
// *
// * These actions are intercepted here, where `socketio.emit()` calls are
// * made on their behalf - see `emitters.ts`. The emitter functions
// * are the outbound communication to the server.
// *
// * Listeners are also established here - see `listeners.ts`. The listener
// * functions receive communication from the server and usually dispatch
// * some new action to handle whatever data was sent from the server.
// */
// export const socketioMiddleware = () => {
// const { origin } = new URL(window.location.href);
// const socketio = io(origin, {
// timeout: 60000,
// path: `${window.location.pathname}socket.io`,
// });
// socketio.disconnect();
// let areListenersSet = false;
// const middleware: Middleware = (store) => (next) => (action) => {
// const {
// onConnect,
// onDisconnect,
// onError,
// onPostprocessingResult,
// onGenerationResult,
// onIntermediateResult,
// onProgressUpdate,
// onGalleryImages,
// onProcessingCanceled,
// onImageDeleted,
// onSystemConfig,
// onModelChanged,
// onFoundModels,
// onNewModelAdded,
// onModelDeleted,
// onModelConverted,
// onModelsMerged,
// onModelChangeFailed,
// onTempFolderEmptied,
// } = makeSocketIOListeners(store);
// const {
// emitGenerateImage,
// emitRunESRGAN,
// emitRunFacetool,
// emitDeleteImage,
// emitRequestImages,
// emitRequestNewImages,
// emitCancelProcessing,
// emitRequestSystemConfig,
// emitSearchForModels,
// emitAddNewModel,
// emitDeleteModel,
// emitConvertToDiffusers,
// emitMergeDiffusersModels,
// emitRequestModelChange,
// emitSaveStagingAreaImageToGallery,
// emitRequestEmptyTempFolder,
// } = makeSocketIOEmitters(store, socketio);
// /**
// * If this is the first time the middleware has been called (e.g. during store setup),
// * initialize all our socket.io listeners.
// */
// if (!areListenersSet) {
// socketio.on('connect', () => onConnect());
// socketio.on('disconnect', () => onDisconnect());
// socketio.on('error', (data: InvokeAI.ErrorResponse) => onError(data));
// socketio.on('generationResult', (data: InvokeAI.ImageResultResponse) =>
// onGenerationResult(data)
// );
// socketio.on(
// 'postprocessingResult',
// (data: InvokeAI.ImageResultResponse) => onPostprocessingResult(data)
// );
// socketio.on('intermediateResult', (data: InvokeAI.ImageResultResponse) =>
// onIntermediateResult(data)
// );
// socketio.on('progressUpdate', (data: InvokeAI.SystemStatus) =>
// onProgressUpdate(data)
// );
// socketio.on('galleryImages', (data: InvokeAI.GalleryImagesResponse) =>
// onGalleryImages(data)
// );
// socketio.on('processingCanceled', () => {
// onProcessingCanceled();
// });
// socketio.on('imageDeleted', (data: InvokeAI.ImageDeletedResponse) => {
// onImageDeleted(data);
// });
// socketio.on('systemConfig', (data: InvokeAI.SystemConfig) => {
// onSystemConfig(data);
// });
// socketio.on('foundModels', (data: InvokeAI.FoundModelResponse) => {
// onFoundModels(data);
// });
// socketio.on('newModelAdded', (data: InvokeAI.ModelAddedResponse) => {
// onNewModelAdded(data);
// });
// socketio.on('modelDeleted', (data: InvokeAI.ModelDeletedResponse) => {
// onModelDeleted(data);
// });
// socketio.on('modelConverted', (data: InvokeAI.ModelConvertedResponse) => {
// onModelConverted(data);
// });
// socketio.on('modelsMerged', (data: InvokeAI.ModelsMergedResponse) => {
// onModelsMerged(data);
// });
// socketio.on('modelChanged', (data: InvokeAI.ModelChangeResponse) => {
// onModelChanged(data);
// });
// socketio.on('modelChangeFailed', (data: InvokeAI.ModelChangeResponse) => {
// onModelChangeFailed(data);
// });
// socketio.on('tempFolderEmptied', () => {
// onTempFolderEmptied();
// });
// areListenersSet = true;
// }
// /**
// * Handle redux actions caught by middleware.
// */
// switch (action.type) {
// case 'socketio/generateImage': {
// emitGenerateImage(action.payload);
// break;
// }
// case 'socketio/runESRGAN': {
// emitRunESRGAN(action.payload);
// break;
// }
// case 'socketio/runFacetool': {
// emitRunFacetool(action.payload);
// break;
// }
// case 'socketio/deleteImage': {
// emitDeleteImage(action.payload);
// break;
// }
// case 'socketio/requestImages': {
// emitRequestImages(action.payload);
// break;
// }
// case 'socketio/requestNewImages': {
// emitRequestNewImages(action.payload);
// break;
// }
// case 'socketio/cancelProcessing': {
// emitCancelProcessing();
// break;
// }
// case 'socketio/requestSystemConfig': {
// emitRequestSystemConfig();
// break;
// }
// case 'socketio/searchForModels': {
// emitSearchForModels(action.payload);
// break;
// }
// case 'socketio/addNewModel': {
// emitAddNewModel(action.payload);
// break;
// }
// case 'socketio/deleteModel': {
// emitDeleteModel(action.payload);
// break;
// }
// case 'socketio/convertToDiffusers': {
// emitConvertToDiffusers(action.payload);
// break;
// }
// case 'socketio/mergeDiffusersModels': {
// emitMergeDiffusersModels(action.payload);
// break;
// }
// case 'socketio/requestModelChange': {
// emitRequestModelChange(action.payload);
// break;
// }
// case 'socketio/saveStagingAreaImageToGallery': {
// emitSaveStagingAreaImageToGallery(action.payload);
// break;
// }
// case 'socketio/requestEmptyTempFolder': {
// emitRequestEmptyTempFolder();
// break;
// }
// }
// next(action);
// };
// return middleware;
// };
export default {};

View File

@ -1,7 +1,6 @@
import { canvasPersistDenylist } from 'features/canvas/store/canvasPersistDenylist';
import { controlNetDenylist } from 'features/controlNet/store/controlNetDenylist';
import { galleryPersistDenylist } from 'features/gallery/store/galleryPersistDenylist';
import { lightboxPersistDenylist } from 'features/lightbox/store/lightboxPersistDenylist';
import { nodesPersistDenylist } from 'features/nodes/store/nodesPersistDenylist';
import { generationPersistDenylist } from 'features/parameters/store/generationPersistDenylist';
import { postprocessingPersistDenylist } from 'features/parameters/store/postprocessingPersistDenylist';
@ -16,7 +15,6 @@ const serializationDenylist: {
canvas: canvasPersistDenylist,
gallery: galleryPersistDenylist,
generation: generationPersistDenylist,
lightbox: lightboxPersistDenylist,
nodes: nodesPersistDenylist,
postprocessing: postprocessingPersistDenylist,
system: systemPersistDenylist,

View File

@ -1,7 +1,6 @@
import { initialCanvasState } from 'features/canvas/store/canvasSlice';
import { initialControlNetState } from 'features/controlNet/store/controlNetSlice';
import { initialGalleryState } from 'features/gallery/store/gallerySlice';
import { initialLightboxState } from 'features/lightbox/store/lightboxSlice';
import { initialNodesState } from 'features/nodes/store/nodesSlice';
import { initialGenerationState } from 'features/parameters/store/generationSlice';
import { initialPostprocessingState } from 'features/parameters/store/postprocessingSlice';
@ -18,7 +17,6 @@ const initialStates: {
canvas: initialCanvasState,
gallery: initialGalleryState,
generation: initialGenerationState,
lightbox: initialLightboxState,
nodes: initialNodesState,
postprocessing: initialPostprocessingState,
system: initialSystemState,

View File

@ -1,4 +1,8 @@
/**
* This is a list of actions that should be excluded in the Redux DevTools.
*/
export const actionsDenylist = [
// very spammy canvas actions
'canvas/setCursorPosition',
'canvas/setStageCoordinates',
'canvas/setStageScale',
@ -7,7 +11,11 @@ export const actionsDenylist = [
'canvas/setBoundingBoxDimensions',
'canvas/setIsDrawing',
'canvas/addPointToCurrentLine',
// bazillions during generation
'socket/socketGeneratorProgress',
'socket/appSocketGeneratorProgress',
// every time user presses shift
'hotkeys/shiftKeyPressed',
// this happens after every state change
'@@REMEMBER_PERSISTED',
];

View File

@ -8,6 +8,7 @@ import {
import type { AppDispatch, RootState } from '../../store';
import { addCommitStagingAreaImageListener } from './listeners/addCommitStagingAreaImageListener';
import { addAppConfigReceivedListener } from './listeners/appConfigReceived';
import { addAppStartedListener } from './listeners/appStarted';
import { addBoardIdSelectedListener } from './listeners/boardIdSelected';
import { addRequestedBoardImageDeletionListener } from './listeners/boardImagesDeleted';
@ -51,12 +52,12 @@ import {
} from './listeners/imageUrlsReceived';
import { addInitialImageSelectedListener } from './listeners/initialImageSelected';
import { addModelSelectedListener } from './listeners/modelSelected';
import { addModelsLoadedListener } from './listeners/modelsLoaded';
import { addReceivedOpenAPISchemaListener } from './listeners/receivedOpenAPISchema';
import {
addReceivedPageOfImagesFulfilledListener,
addReceivedPageOfImagesRejectedListener,
} from './listeners/receivedPageOfImages';
import { addSelectionAddedToBatchListener } from './listeners/selectionAddedToBatch';
import {
addSessionCanceledFulfilledListener,
addSessionCanceledPendingListener,
@ -213,9 +214,6 @@ addBoardIdSelectedListener();
// Node schemas
addReceivedOpenAPISchemaListener();
// Batches
addSelectionAddedToBatchListener();
// DND
addImageDroppedListener();
@ -224,3 +222,5 @@ addModelSelectedListener();
// app startup
addAppStartedListener();
addModelsLoadedListener();
addAppConfigReceivedListener();

View File

@ -0,0 +1,17 @@
import { setInfillMethod } from 'features/parameters/store/generationSlice';
import { appInfoApi } from 'services/api/endpoints/appInfo';
import { startAppListening } from '..';
export const addAppConfigReceivedListener = () => {
startAppListening({
matcher: appInfoApi.endpoints.getAppConfig.matchFulfilled,
effect: async (action, { getState, dispatch }) => {
const { infill_methods } = action.payload;
const infillMethod = getState().generation.infillMethod;
if (!infill_methods.includes(infillMethod)) {
dispatch(setInfillMethod(infill_methods[0]));
}
},
});
};

View File

@ -1,5 +1,7 @@
import { createAction } from '@reduxjs/toolkit';
import {
ASSETS_CATEGORIES,
IMAGE_CATEGORIES,
INITIAL_IMAGE_LIMIT,
isLoadingChanged,
} from 'features/gallery/store/gallerySlice';
@ -20,7 +22,7 @@ export const addAppStartedListener = () => {
// fill up the gallery tab with images
await dispatch(
receivedPageOfImages({
categories: ['general'],
categories: IMAGE_CATEGORIES,
is_intermediate: false,
offset: 0,
limit: INITIAL_IMAGE_LIMIT,
@ -30,7 +32,7 @@ export const addAppStartedListener = () => {
// fill up the assets tab with images
await dispatch(
receivedPageOfImages({
categories: ['control', 'mask', 'user', 'other'],
categories: ASSETS_CATEGORIES,
is_intermediate: false,
offset: 0,
limit: INITIAL_IMAGE_LIMIT,

View File

@ -1,15 +1,18 @@
import { log } from 'app/logging/useLogger';
import { startAppListening } from '..';
import { selectFilteredImages } from 'features/gallery/store/gallerySelectors';
import {
ASSETS_CATEGORIES,
IMAGE_CATEGORIES,
boardIdSelected,
imageSelected,
selectImagesAll,
boardIdSelected,
} from 'features/gallery/store/gallerySlice';
import { boardsApi } from 'services/api/endpoints/boards';
import {
IMAGES_PER_PAGE,
receivedPageOfImages,
} from 'services/api/thunks/image';
import { boardsApi } from 'services/api/endpoints/boards';
import { startAppListening } from '..';
const moduleLog = log.child({ namespace: 'boards' });
@ -24,19 +27,24 @@ export const addBoardIdSelectedListener = () => {
const state = getState();
const allImages = selectImagesAll(state);
if (!board_id) {
// a board was unselected
dispatch(imageSelected(allImages[0]?.image_name));
if (board_id === 'all') {
// Selected all images
dispatch(imageSelected(allImages[0]?.image_name ?? null));
return;
}
const { categories } = state.gallery;
if (board_id === 'batch') {
// Selected the batch
dispatch(imageSelected(state.gallery.batchImageNames[0] ?? null));
return;
}
const filteredImages = allImages.filter((i) => {
const isInCategory = categories.includes(i.image_category);
const isInSelectedBoard = board_id ? i.board_id === board_id : true;
return isInCategory && isInSelectedBoard;
});
const filteredImages = selectFilteredImages(state);
const categories =
state.gallery.galleryView === 'images'
? IMAGE_CATEGORIES
: ASSETS_CATEGORIES;
// get the board from the cache
const { data: boards } =
@ -45,7 +53,7 @@ export const addBoardIdSelectedListener = () => {
if (!board) {
// can't find the board in cache...
dispatch(imageSelected(allImages[0]?.image_name));
dispatch(boardIdSelected('all'));
return;
}
@ -63,48 +71,3 @@ export const addBoardIdSelectedListener = () => {
},
});
};
export const addBoardIdSelected_changeSelectedImage_listener = () => {
startAppListening({
actionCreator: boardIdSelected,
effect: (action, { getState, dispatch }) => {
const board_id = action.payload;
const state = getState();
// we need to check if we need to fetch more images
if (!board_id) {
// a board was unselected - we don't need to do anything
return;
}
const { categories } = state.gallery;
const filteredImages = selectImagesAll(state).filter((i) => {
const isInCategory = categories.includes(i.image_category);
const isInSelectedBoard = board_id ? i.board_id === board_id : true;
return isInCategory && isInSelectedBoard;
});
// get the board from the cache
const { data: boards } =
boardsApi.endpoints.listAllBoards.select()(state);
const board = boards?.find((b) => b.board_id === board_id);
if (!board) {
// can't find the board in cache...
return;
}
// if we haven't loaded one full page of images from this board, load more
if (
filteredImages.length < board.image_count &&
filteredImages.length < IMAGES_PER_PAGE
) {
dispatch(
receivedPageOfImages({ categories, board_id, is_intermediate: false })
);
}
},
});
};

View File

@ -1,13 +1,13 @@
import { startAppListening } from '..';
import { imageMetadataReceived } from 'services/api/thunks/image';
import { log } from 'app/logging/useLogger';
import { controlNetImageProcessed } from 'features/controlNet/store/actions';
import { Graph } from 'services/api/types';
import { sessionCreated } from 'services/api/thunks/session';
import { sessionReadyToInvoke } from 'features/system/store/actions';
import { socketInvocationComplete } from 'services/events/actions';
import { isImageOutput } from 'services/api/guards';
import { controlNetProcessedImageChanged } from 'features/controlNet/store/controlNetSlice';
import { sessionReadyToInvoke } from 'features/system/store/actions';
import { isImageOutput } from 'services/api/guards';
import { imageDTOReceived } from 'services/api/thunks/image';
import { sessionCreated } from 'services/api/thunks/session';
import { Graph } from 'services/api/types';
import { socketInvocationComplete } from 'services/events/actions';
import { startAppListening } from '..';
const moduleLog = log.child({ namespace: 'controlNet' });
@ -63,10 +63,8 @@ export const addControlNetImageProcessedListener = () => {
// Wait for the ImageDTO to be received
const [imageMetadataReceivedAction] = await take(
(
action
): action is ReturnType<typeof imageMetadataReceived.fulfilled> =>
imageMetadataReceived.fulfilled.match(action) &&
(action): action is ReturnType<typeof imageDTOReceived.fulfilled> =>
imageDTOReceived.fulfilled.match(action) &&
action.payload.image_name === image_name
);
const processedControlImage = imageMetadataReceivedAction.payload;

View File

@ -1,7 +1,6 @@
import { log } from 'app/logging/useLogger';
import { startAppListening } from '..';
import { imageMetadataReceived } from 'services/api/thunks/image';
import { boardImagesApi } from 'services/api/endpoints/boardImages';
import { startAppListening } from '..';
const moduleLog = log.child({ namespace: 'boards' });
@ -15,12 +14,6 @@ export const addImageAddedToBoardFulfilledListener = () => {
{ data: { board_id, image_name } },
'Image added to board'
);
dispatch(
imageMetadataReceived({
image_name,
})
);
},
});
};

View File

@ -1,10 +1,10 @@
import { log } from 'app/logging/useLogger';
import { resetCanvas } from 'features/canvas/store/canvasSlice';
import { controlNetReset } from 'features/controlNet/store/controlNetSlice';
import { selectNextImageToSelect } from 'features/gallery/store/gallerySelectors';
import {
imageRemoved,
imageSelected,
selectFilteredImages,
} from 'features/gallery/store/gallerySlice';
import {
imageDeletionConfirmed,
@ -12,7 +12,6 @@ import {
} from 'features/imageDeletion/store/imageDeletionSlice';
import { nodeEditorReset } from 'features/nodes/store/nodesSlice';
import { clearInitialImage } from 'features/parameters/store/generationSlice';
import { clamp } from 'lodash-es';
import { api } from 'services/api';
import { imageDeleted } from 'services/api/thunks/image';
import { startAppListening } from '..';
@ -37,26 +36,10 @@ export const addRequestedImageDeletionListener = () => {
state.gallery.selection[state.gallery.selection.length - 1];
if (lastSelectedImage === image_name) {
const filteredImages = selectFilteredImages(state);
const ids = filteredImages.map((i) => i.image_name);
const deletedImageIndex = ids.findIndex(
(result) => result.toString() === image_name
);
const filteredIds = ids.filter((id) => id.toString() !== image_name);
const newSelectedImageIndex = clamp(
deletedImageIndex,
0,
filteredIds.length - 1
);
const newSelectedImageId = filteredIds[newSelectedImageIndex];
const newSelectedImageId = selectNextImageToSelect(state, image_name);
if (newSelectedImageId) {
dispatch(imageSelected(newSelectedImageId as string));
dispatch(imageSelected(newSelectedImageId));
} else {
dispatch(imageSelected(null));
}

View File

@ -4,13 +4,12 @@ import {
TypesafeDroppableData,
} from 'app/components/ImageDnd/typesafeDnd';
import { log } from 'app/logging/useLogger';
import {
imageAddedToBatch,
imagesAddedToBatch,
} from 'features/batch/store/batchSlice';
import { setInitialCanvasImage } from 'features/canvas/store/canvasSlice';
import { controlNetImageChanged } from 'features/controlNet/store/controlNetSlice';
import { imageSelected } from 'features/gallery/store/gallerySlice';
import {
imageSelected,
imagesAddedToBatch,
} from 'features/gallery/store/gallerySlice';
import {
fieldValueChanged,
imageCollectionFieldValueChanged,
@ -21,57 +20,66 @@ import { startAppListening } from '../';
const moduleLog = log.child({ namespace: 'dnd' });
export const imageDropped = createAction<{
export const dndDropped = createAction<{
overData: TypesafeDroppableData;
activeData: TypesafeDraggableData;
}>('dnd/imageDropped');
}>('dnd/dndDropped');
export const addImageDroppedListener = () => {
startAppListening({
actionCreator: imageDropped,
effect: (action, { dispatch, getState }) => {
actionCreator: dndDropped,
effect: async (action, { dispatch, getState, take }) => {
const { activeData, overData } = action.payload;
const { actionType } = overData;
const state = getState();
moduleLog.debug(
{ data: { activeData, overData } },
'Image or selection dropped'
);
// set current image
if (
actionType === 'SET_CURRENT_IMAGE' &&
overData.actionType === 'SET_CURRENT_IMAGE' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO
) {
dispatch(imageSelected(activeData.payload.imageDTO.image_name));
return;
}
// set initial image
if (
actionType === 'SET_INITIAL_IMAGE' &&
overData.actionType === 'SET_INITIAL_IMAGE' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO
) {
dispatch(initialImageChanged(activeData.payload.imageDTO));
return;
}
// add image to batch
if (
actionType === 'ADD_TO_BATCH' &&
overData.actionType === 'ADD_TO_BATCH' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO
) {
dispatch(imageAddedToBatch(activeData.payload.imageDTO.image_name));
dispatch(imagesAddedToBatch([activeData.payload.imageDTO.image_name]));
return;
}
// add multiple images to batch
if (
actionType === 'ADD_TO_BATCH' &&
activeData.payloadType === 'GALLERY_SELECTION'
overData.actionType === 'ADD_TO_BATCH' &&
activeData.payloadType === 'IMAGE_NAMES'
) {
dispatch(imagesAddedToBatch(state.gallery.selection));
dispatch(imagesAddedToBatch(activeData.payload.image_names));
return;
}
// set control image
if (
actionType === 'SET_CONTROLNET_IMAGE' &&
overData.actionType === 'SET_CONTROLNET_IMAGE' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO
) {
@ -82,20 +90,22 @@ export const addImageDroppedListener = () => {
controlNetId,
})
);
return;
}
// set canvas image
if (
actionType === 'SET_CANVAS_INITIAL_IMAGE' &&
overData.actionType === 'SET_CANVAS_INITIAL_IMAGE' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO
) {
dispatch(setInitialCanvasImage(activeData.payload.imageDTO));
return;
}
// set nodes image
if (
actionType === 'SET_NODES_IMAGE' &&
overData.actionType === 'SET_NODES_IMAGE' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO
) {
@ -107,11 +117,12 @@ export const addImageDroppedListener = () => {
value: activeData.payload.imageDTO,
})
);
return;
}
// set multiple nodes images (single image handler)
if (
actionType === 'SET_MULTI_NODES_IMAGE' &&
overData.actionType === 'SET_MULTI_NODES_IMAGE' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO
) {
@ -123,43 +134,30 @@ export const addImageDroppedListener = () => {
value: [activeData.payload.imageDTO],
})
);
return;
}
// set multiple nodes images (multiple images handler)
if (
actionType === 'SET_MULTI_NODES_IMAGE' &&
activeData.payloadType === 'GALLERY_SELECTION'
overData.actionType === 'SET_MULTI_NODES_IMAGE' &&
activeData.payloadType === 'IMAGE_NAMES'
) {
const { fieldName, nodeId } = overData.context;
dispatch(
imageCollectionFieldValueChanged({
nodeId,
fieldName,
value: state.gallery.selection.map((image_name) => ({
value: activeData.payload.image_names.map((image_name) => ({
image_name,
})),
})
);
return;
}
// remove image from board
// TODO: remove board_id from `removeImageFromBoard()` endpoint
// TODO: handle multiple images
// if (
// actionType === 'MOVE_BOARD' &&
// activeData.payloadType === 'IMAGE_DTO' &&
// activeData.payload.imageDTO &&
// overData.boardId !== null
// ) {
// const { image_name } = activeData.payload.imageDTO;
// dispatch(
// boardImagesApi.endpoints.removeImageFromBoard.initiate({ image_name })
// );
// }
// add image to board
if (
actionType === 'MOVE_BOARD' &&
overData.actionType === 'MOVE_BOARD' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO &&
overData.context.boardId
@ -172,17 +170,89 @@ export const addImageDroppedListener = () => {
board_id: boardId,
})
);
return;
}
// add multiple images to board
// TODO: add endpoint
// if (
// actionType === 'ADD_TO_BATCH' &&
// activeData.payloadType === 'IMAGE_NAMES' &&
// activeData.payload.imageDTONames
// ) {
// dispatch(boardImagesApi.endpoints.addImagesToBoard.intiate({}));
// }
// remove image from board
if (
overData.actionType === 'MOVE_BOARD' &&
activeData.payloadType === 'IMAGE_DTO' &&
activeData.payload.imageDTO &&
overData.context.boardId === null
) {
const { image_name, board_id } = activeData.payload.imageDTO;
if (board_id) {
dispatch(
boardImagesApi.endpoints.removeImageFromBoard.initiate({
image_name,
board_id,
})
);
}
return;
}
// add gallery selection to board
if (
overData.actionType === 'MOVE_BOARD' &&
activeData.payloadType === 'IMAGE_NAMES' &&
overData.context.boardId
) {
console.log('adding gallery selection to board');
const board_id = overData.context.boardId;
dispatch(
boardImagesApi.endpoints.addManyBoardImages.initiate({
board_id,
image_names: activeData.payload.image_names,
})
);
return;
}
// remove gallery selection from board
if (
overData.actionType === 'MOVE_BOARD' &&
activeData.payloadType === 'IMAGE_NAMES' &&
overData.context.boardId === null
) {
console.log('removing gallery selection to board');
dispatch(
boardImagesApi.endpoints.deleteManyBoardImages.initiate({
image_names: activeData.payload.image_names,
})
);
return;
}
// add batch selection to board
if (
overData.actionType === 'MOVE_BOARD' &&
activeData.payloadType === 'IMAGE_NAMES' &&
overData.context.boardId
) {
const board_id = overData.context.boardId;
dispatch(
boardImagesApi.endpoints.addManyBoardImages.initiate({
board_id,
image_names: activeData.payload.image_names,
})
);
return;
}
// remove batch selection from board
if (
overData.actionType === 'MOVE_BOARD' &&
activeData.payloadType === 'IMAGE_NAMES' &&
overData.context.boardId === null
) {
dispatch(
boardImagesApi.endpoints.deleteManyBoardImages.initiate({
image_names: activeData.payload.image_names,
})
);
return;
}
},
});
};

View File

@ -1,13 +1,13 @@
import { log } from 'app/logging/useLogger';
import { startAppListening } from '..';
import { imageMetadataReceived, imageUpdated } from 'services/api/thunks/image';
import { imageUpserted } from 'features/gallery/store/gallerySlice';
import { imageDTOReceived, imageUpdated } from 'services/api/thunks/image';
import { startAppListening } from '..';
const moduleLog = log.child({ namespace: 'image' });
export const addImageMetadataReceivedFulfilledListener = () => {
startAppListening({
actionCreator: imageMetadataReceived.fulfilled,
actionCreator: imageDTOReceived.fulfilled,
effect: (action, { getState, dispatch }) => {
const image = action.payload;
@ -40,7 +40,7 @@ export const addImageMetadataReceivedFulfilledListener = () => {
export const addImageMetadataReceivedRejectedListener = () => {
startAppListening({
actionCreator: imageMetadataReceived.rejected,
actionCreator: imageDTOReceived.rejected,
effect: (action, { getState, dispatch }) => {
moduleLog.debug(
{ data: { image: action.meta.arg } },

View File

@ -1,7 +1,6 @@
import { log } from 'app/logging/useLogger';
import { startAppListening } from '..';
import { imageMetadataReceived } from 'services/api/thunks/image';
import { boardImagesApi } from 'services/api/endpoints/boardImages';
import { startAppListening } from '..';
const moduleLog = log.child({ namespace: 'boards' });
@ -15,12 +14,6 @@ export const addImageRemovedFromBoardFulfilledListener = () => {
{ data: { board_id, image_name } },
'Image added to board'
);
dispatch(
imageMetadataReceived({
image_name,
})
);
},
});
};

View File

@ -1,13 +1,15 @@
import { startAppListening } from '..';
import { imageUploaded } from 'services/api/thunks/image';
import { addToast } from 'features/system/store/systemSlice';
import { log } from 'app/logging/useLogger';
import { imageUpserted } from 'features/gallery/store/gallerySlice';
import { setInitialCanvasImage } from 'features/canvas/store/canvasSlice';
import { controlNetImageChanged } from 'features/controlNet/store/controlNetSlice';
import { initialImageChanged } from 'features/parameters/store/generationSlice';
import {
imageUpserted,
imagesAddedToBatch,
} from 'features/gallery/store/gallerySlice';
import { fieldValueChanged } from 'features/nodes/store/nodesSlice';
import { imageAddedToBatch } from 'features/batch/store/batchSlice';
import { initialImageChanged } from 'features/parameters/store/generationSlice';
import { addToast } from 'features/system/store/systemSlice';
import { imageUploaded } from 'services/api/thunks/image';
import { startAppListening } from '..';
const moduleLog = log.child({ namespace: 'image' });
@ -73,7 +75,7 @@ export const addImageUploadedFulfilledListener = () => {
}
if (postUploadAction?.type === 'ADD_TO_BATCH') {
dispatch(imageAddedToBatch(image.image_name));
dispatch(imagesAddedToBatch([image.image_name]));
return;
}
},

View File

@ -1,40 +1,70 @@
import { makeToast } from 'app/components/Toaster';
import { log } from 'app/logging/useLogger';
import { loraRemoved } from 'features/lora/store/loraSlice';
import { modelSelected } from 'features/parameters/store/actions';
import {
modelChanged,
vaeSelected,
} from 'features/parameters/store/generationSlice';
import { zMainModel } from 'features/parameters/store/parameterZodSchemas';
import { zMainModel } from 'features/parameters/types/parameterSchemas';
import { addToast } from 'features/system/store/systemSlice';
import { forEach } from 'lodash-es';
import { startAppListening } from '..';
import { lorasCleared } from '../../../../../features/lora/store/loraSlice';
const moduleLog = log.child({ module: 'models' });
export const addModelSelectedListener = () => {
startAppListening({
actionCreator: modelSelected,
effect: (action, { getState, dispatch }) => {
const state = getState();
const [base_model, type, name] = action.payload.split('/');
const result = zMainModel.safeParse(action.payload);
if (state.generation.model?.base_model !== base_model) {
dispatch(
addToast(
makeToast({
title: 'Base model changed, clearing submodels',
status: 'warning',
})
)
if (!result.success) {
moduleLog.error(
{ error: result.error.format() },
'Failed to parse main model'
);
dispatch(vaeSelected(null));
dispatch(lorasCleared());
// TODO: controlnet cleared
return;
}
const newModel = zMainModel.parse({
id: action.payload,
base_model,
name,
});
const newModel = result.data;
const { base_model } = newModel;
if (state.generation.model?.base_model !== base_model) {
// we may need to reset some incompatible submodels
let modelsCleared = 0;
// handle incompatible loras
forEach(state.lora.loras, (lora, id) => {
if (lora.base_model !== base_model) {
dispatch(loraRemoved(id));
modelsCleared += 1;
}
});
// handle incompatible vae
const { vae } = state.generation;
if (vae && vae.base_model !== base_model) {
dispatch(vaeSelected(null));
modelsCleared += 1;
}
// TODO: handle incompatible controlnet; pending model manager support
if (modelsCleared > 0) {
dispatch(
addToast(
makeToast({
title: `Base model changed, cleared ${modelsCleared} incompatible submodel${
modelsCleared === 1 ? '' : 's'
}`,
status: 'warning',
})
)
);
}
}
dispatch(modelChanged(newModel));
},

View File

@ -0,0 +1,133 @@
import { log } from 'app/logging/useLogger';
import { loraRemoved } from 'features/lora/store/loraSlice';
import {
modelChanged,
vaeSelected,
} from 'features/parameters/store/generationSlice';
import {
zMainModel,
zVaeModel,
} from 'features/parameters/types/parameterSchemas';
import { forEach, some } from 'lodash-es';
import { modelsApi } from 'services/api/endpoints/models';
import { startAppListening } from '..';
const moduleLog = log.child({ module: 'models' });
export const addModelsLoadedListener = () => {
startAppListening({
matcher: modelsApi.endpoints.getMainModels.matchFulfilled,
effect: async (action, { getState, dispatch }) => {
// models loaded, we need to ensure the selected model is available and if not, select the first one
const currentModel = getState().generation.model;
const isCurrentModelAvailable = some(
action.payload.entities,
(m) =>
m?.model_name === currentModel?.model_name &&
m?.base_model === currentModel?.base_model
);
if (isCurrentModelAvailable) {
return;
}
const firstModelId = action.payload.ids[0];
const firstModel = action.payload.entities[firstModelId];
if (!firstModel) {
// No models loaded at all
dispatch(modelChanged(null));
return;
}
const result = zMainModel.safeParse(firstModel);
if (!result.success) {
moduleLog.error(
{ error: result.error.format() },
'Failed to parse main model'
);
return;
}
dispatch(modelChanged(result.data));
},
});
startAppListening({
matcher: modelsApi.endpoints.getVaeModels.matchFulfilled,
effect: async (action, { getState, dispatch }) => {
// VAEs loaded, need to reset the VAE is it's no longer available
const currentVae = getState().generation.vae;
if (currentVae === null) {
// null is a valid VAE! it means "use the default with the main model"
return;
}
const isCurrentVAEAvailable = some(
action.payload.entities,
(m) =>
m?.model_name === currentVae?.model_name &&
m?.base_model === currentVae?.base_model
);
if (isCurrentVAEAvailable) {
return;
}
const firstModelId = action.payload.ids[0];
const firstModel = action.payload.entities[firstModelId];
if (!firstModel) {
// No custom VAEs loaded at all; use the default
dispatch(modelChanged(null));
return;
}
const result = zVaeModel.safeParse(firstModel);
if (!result.success) {
moduleLog.error(
{ error: result.error.format() },
'Failed to parse VAE model'
);
return;
}
dispatch(vaeSelected(result.data));
},
});
startAppListening({
matcher: modelsApi.endpoints.getLoRAModels.matchFulfilled,
effect: async (action, { getState, dispatch }) => {
// LoRA models loaded - need to remove missing LoRAs from state
const loras = getState().lora.loras;
forEach(loras, (lora, id) => {
const isLoRAAvailable = some(
action.payload.entities,
(m) =>
m?.model_name === lora?.model_name &&
m?.base_model === lora?.base_model
);
if (isLoRAAvailable) {
return;
}
dispatch(loraRemoved(id));
});
},
});
startAppListening({
matcher: modelsApi.endpoints.getControlNetModels.matchFulfilled,
effect: async (action, { getState, dispatch }) => {
// ControlNet models loaded - need to remove missing ControlNets from state
// TODO: pending model manager controlnet support
},
});
};

View File

@ -1,19 +0,0 @@
import { startAppListening } from '..';
import { log } from 'app/logging/useLogger';
import {
imagesAddedToBatch,
selectionAddedToBatch,
} from 'features/batch/store/batchSlice';
const moduleLog = log.child({ namespace: 'batch' });
export const addSelectionAddedToBatchListener = () => {
startAppListening({
actionCreator: selectionAddedToBatch,
effect: (action, { dispatch, getState }) => {
const { selection } = getState().gallery;
dispatch(imagesAddedToBatch(selection));
},
});
};

View File

@ -30,6 +30,7 @@ export const addSessionCreatedRejectedListener = () => {
effect: (action, { getState, dispatch }) => {
if (action.payload) {
const { arg, error } = action.payload;
const stringifiedError = JSON.stringify(error);
moduleLog.error(
{
data: {
@ -37,7 +38,7 @@ export const addSessionCreatedRejectedListener = () => {
error: serializeError(error),
},
},
`Problem creating session`
`Problem creating session: ${stringifiedError}`
);
}
},

View File

@ -33,6 +33,7 @@ export const addSessionInvokedRejectedListener = () => {
effect: (action, { getState, dispatch }) => {
if (action.payload) {
const { arg, error } = action.payload;
const stringifiedError = JSON.stringify(error);
moduleLog.error(
{
data: {
@ -40,7 +41,7 @@ export const addSessionInvokedRejectedListener = () => {
error: serializeError(error),
},
},
`Problem invoking session`
`Problem invoking session: ${stringifiedError}`
);
}
},

View File

@ -1,15 +1,15 @@
import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
import { startAppListening } from '../..';
import { log } from 'app/logging/useLogger';
import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
import { progressImageSet } from 'features/system/store/systemSlice';
import { boardImagesApi } from 'services/api/endpoints/boardImages';
import { isImageOutput } from 'services/api/guards';
import { imageDTOReceived } from 'services/api/thunks/image';
import { sessionCanceled } from 'services/api/thunks/session';
import {
appSocketInvocationComplete,
socketInvocationComplete,
} from 'services/events/actions';
import { imageMetadataReceived } from 'services/api/thunks/image';
import { sessionCanceled } from 'services/api/thunks/session';
import { isImageOutput } from 'services/api/guards';
import { progressImageSet } from 'features/system/store/systemSlice';
import { boardImagesApi } from 'services/api/endpoints/boardImages';
import { startAppListening } from '../..';
const moduleLog = log.child({ namespace: 'socketio' });
const nodeDenylist = ['dataURL_image'];
@ -42,13 +42,13 @@ export const addInvocationCompleteEventListener = () => {
// Get its metadata
dispatch(
imageMetadataReceived({
imageDTOReceived({
image_name,
})
);
const [{ payload: imageDTO }] = await take(
imageMetadataReceived.fulfilled.match
imageDTOReceived.fulfilled.match
);
// Handle canvas image

View File

@ -13,7 +13,7 @@ export const addInvocationErrorEventListener = () => {
effect: (action, { dispatch, getState }) => {
moduleLog.error(
action.payload,
`Invocation error (${action.payload.data.node.type})`
`Invocation error (${action.payload.data.node.type}): ${action.payload.data.error}`
);
dispatch(appSocketInvocationError(action.payload));
},

View File

@ -1,6 +1,7 @@
import {
AnyAction,
ThunkDispatch,
autoBatchEnhancer,
combineReducers,
configureStore,
} from '@reduxjs/toolkit';
@ -8,14 +9,12 @@ import {
import dynamicMiddlewares from 'redux-dynamic-middlewares';
import { rememberEnhancer, rememberReducer } from 'redux-remember';
import batchReducer from 'features/batch/store/batchSlice';
import canvasReducer from 'features/canvas/store/canvasSlice';
import controlNetReducer from 'features/controlNet/store/controlNetSlice';
import dynamicPromptsReducer from 'features/dynamicPrompts/store/slice';
import boardsReducer from 'features/gallery/store/boardSlice';
import galleryReducer from 'features/gallery/store/gallerySlice';
import imageDeletionReducer from 'features/imageDeletion/store/imageDeletionSlice';
import lightboxReducer from 'features/lightbox/store/lightboxSlice';
import loraReducer from 'features/lora/store/loraSlice';
import nodesReducer from 'features/nodes/store/nodesSlice';
import generationReducer from 'features/parameters/store/generationSlice';
@ -39,7 +38,6 @@ const allReducers = {
canvas: canvasReducer,
gallery: galleryReducer,
generation: generationReducer,
lightbox: lightboxReducer,
nodes: nodesReducer,
postprocessing: postprocessingReducer,
system: systemReducer,
@ -49,7 +47,6 @@ const allReducers = {
controlNet: controlNetReducer,
boards: boardsReducer,
dynamicPrompts: dynamicPromptsReducer,
batch: batchReducer,
imageDeletion: imageDeletionReducer,
lora: loraReducer,
[api.reducerPath]: api.reducer,
@ -63,30 +60,29 @@ const rememberedKeys: (keyof typeof allReducers)[] = [
'canvas',
'gallery',
'generation',
'lightbox',
'nodes',
'postprocessing',
'system',
'ui',
'controlNet',
'dynamicPrompts',
'batch',
'lora',
// 'boards',
// 'hotkeys',
// 'config',
];
export const store = configureStore({
reducer: rememberedRootReducer,
enhancers: [
rememberEnhancer(window.localStorage, rememberedKeys, {
persistDebounce: 300,
serialize,
unserialize,
prefix: LOCALSTORAGE_PREFIX,
}),
],
enhancers: (existingEnhancers) => {
return existingEnhancers
.concat(
rememberEnhancer(window.localStorage, rememberedKeys, {
persistDebounce: 300,
serialize,
unserialize,
prefix: LOCALSTORAGE_PREFIX,
})
)
.concat(autoBatchEnhancer());
},
middleware: (getDefaultMiddleware) =>
getDefaultMiddleware({
immutableCheck: false,
@ -96,10 +92,26 @@ export const store = configureStore({
.concat(dynamicMiddlewares)
.prepend(listenerMiddleware.middleware),
devTools: {
actionsDenylist,
actionSanitizer,
stateSanitizer,
trace: true,
predicate: (state, action) => {
// TODO: hook up to the log level param in system slice
// manually type state, cannot type the arg
// const typedState = state as ReturnType<typeof rootReducer>;
if (action.type.startsWith('api/')) {
// don't log api actions, with manual cache updates they are extremely noisy
return false;
}
if (actionsDenylist.includes(action.type)) {
// don't log other noisy actions
return false;
}
return true;
},
},
});

View File

@ -94,7 +94,8 @@ export type AppFeature =
| 'bugLink'
| 'localization'
| 'consoleLogging'
| 'dynamicPrompting';
| 'dynamicPrompting'
| 'batches';
/**
* A disable-able Stable Diffusion feature
@ -102,6 +103,8 @@ export type AppFeature =
export type SDFeature =
| 'controlNet'
| 'noise'
| 'perlinNoise'
| 'noiseThreshold'
| 'variation'
| 'symmetry'
| 'seamless'

Some files were not shown because too many files have changed in this diff Show More