Compare commits
2 Commits
v3.6.2
...
fix-migrat
Author | SHA1 | Date | |
---|---|---|---|
7d65a18667 | |||
7bada22ded |
8
.github/CODEOWNERS
vendored
@ -1,5 +1,5 @@
|
||||
# continuous integration
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr
|
||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername
|
||||
|
||||
# documentation
|
||||
/docs/ @lstein @blessedcoolant @hipsterusername @Millu
|
||||
@ -10,7 +10,7 @@
|
||||
|
||||
# installation and configuration
|
||||
/pyproject.toml @lstein @blessedcoolant @hipsterusername
|
||||
/docker/ @lstein @blessedcoolant @hipsterusername @ebr
|
||||
/docker/ @lstein @blessedcoolant @hipsterusername
|
||||
/scripts/ @ebr @lstein @hipsterusername
|
||||
/installer/ @lstein @ebr @hipsterusername
|
||||
/invokeai/assets @lstein @ebr @hipsterusername
|
||||
@ -26,7 +26,9 @@
|
||||
|
||||
# front ends
|
||||
/invokeai/frontend/CLI @lstein @hipsterusername
|
||||
/invokeai/frontend/install @lstein @ebr @hipsterusername
|
||||
/invokeai/frontend/install @lstein @ebr @hipsterusername
|
||||
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
|
||||
/invokeai/frontend/web @psychedelicious @blessedcoolant @maryhipp @hipsterusername
|
||||
|
||||
|
||||
|
15
.github/pull_request_template.md
vendored
@ -42,21 +42,6 @@ Please provide steps on how to test changes, any hardware or
|
||||
software specifications as well as any other pertinent information.
|
||||
-->
|
||||
|
||||
## Merge Plan
|
||||
|
||||
<!--
|
||||
A merge plan describes how this PR should be handled after it is approved.
|
||||
|
||||
Example merge plans:
|
||||
- "This PR can be merged when approved"
|
||||
- "This must be squash-merged when approved"
|
||||
- "DO NOT MERGE - I will rebase and tidy commits before merging"
|
||||
- "#dev-chat on discord needs to be advised of this change when it is merged"
|
||||
|
||||
A merge plan is particularly important for large PRs or PRs that touch the
|
||||
database in any way.
|
||||
-->
|
||||
|
||||
## Added/updated tests?
|
||||
|
||||
- [ ] Yes
|
||||
|
5
.github/workflows/build-container.yml
vendored
@ -40,14 +40,10 @@ jobs:
|
||||
- name: Free up more disk space on the runner
|
||||
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
|
||||
run: |
|
||||
echo "----- Free space before cleanup"
|
||||
df -h
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
sudo swapoff /mnt/swapfile
|
||||
sudo rm -rf /mnt/swapfile
|
||||
echo "----- Free space after cleanup"
|
||||
df -h
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
@ -95,7 +91,6 @@ jobs:
|
||||
# password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build container
|
||||
timeout-minutes: 40
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
|
6
.github/workflows/lint-frontend.yml
vendored
@ -21,16 +21,16 @@ jobs:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Setup Node 18
|
||||
- name: Setup Node 20
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '18'
|
||||
node-version: '20'
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v2
|
||||
with:
|
||||
version: '8.12.1'
|
||||
version: 8
|
||||
- name: Install dependencies
|
||||
run: 'pnpm install --prefer-frozen-lockfile'
|
||||
- name: Typescript
|
||||
|
50
.github/workflows/pypi-release.yml
vendored
@ -1,15 +1,13 @@
|
||||
name: PyPI Release
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'invokeai/version/invokeai_version.py'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
publish_package:
|
||||
description: 'Publish build on PyPi? [true/false]'
|
||||
required: true
|
||||
default: 'false'
|
||||
|
||||
jobs:
|
||||
build-and-release:
|
||||
release:
|
||||
if: github.repository == 'invoke-ai/InvokeAI'
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
@ -17,43 +15,19 @@ jobs:
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
|
||||
TWINE_NON_INTERACTIVE: 1
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Node 18
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '18'
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v2
|
||||
with:
|
||||
version: '8.12.1'
|
||||
|
||||
- name: Install frontend dependencies
|
||||
run: pnpm install --prefer-frozen-lockfile
|
||||
working-directory: invokeai/frontend/web
|
||||
|
||||
- name: Build frontend
|
||||
run: pnpm run build
|
||||
working-directory: invokeai/frontend/web
|
||||
|
||||
- name: Install python dependencies
|
||||
- name: install deps
|
||||
run: pip install --upgrade build twine
|
||||
|
||||
- name: Build python package
|
||||
- name: build package
|
||||
run: python3 -m build
|
||||
|
||||
- name: Upload build as workflow artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: dist
|
||||
|
||||
- name: Check distribution
|
||||
- name: check distribution
|
||||
run: twine check dist/*
|
||||
|
||||
- name: Check PyPI versions
|
||||
- name: check PyPI versions
|
||||
if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release/')
|
||||
run: |
|
||||
pip install --upgrade requests
|
||||
@ -62,6 +36,6 @@ jobs:
|
||||
EXISTS=scripts.pypi_helper.local_on_pypi(); \
|
||||
print(f'PACKAGE_EXISTS={EXISTS}')" >> $GITHUB_ENV
|
||||
|
||||
- name: Publish build on PyPi
|
||||
if: env.PACKAGE_EXISTS == 'False' && env.TWINE_PASSWORD != '' && github.event.inputs.publish_package == 'true'
|
||||
- name: upload package
|
||||
if: env.PACKAGE_EXISTS == 'False' && env.TWINE_PASSWORD != ''
|
||||
run: twine upload dist/*
|
||||
|
2
.github/workflows/test-invoke-pip.yml
vendored
@ -58,7 +58,7 @@ jobs:
|
||||
|
||||
- name: Check for changed python files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v41
|
||||
uses: tj-actions/changed-files@v37
|
||||
with:
|
||||
files_yaml: |
|
||||
python:
|
||||
|
14
README.md
@ -1,10 +1,10 @@
|
||||
<div align="center">
|
||||
|
||||

|
||||

|
||||
|
||||
# Invoke - Professional Creative AI Tools for Visual Media
|
||||
## To learn more about Invoke, or implement our Business solutions, visit [invoke.com](https://www.invoke.com/about)
|
||||
|
||||
# Invoke AI - Generative AI for Professional Creatives
|
||||
## Professional Creative Tools for Stable Diffusion, Custom-Trained Models, and more.
|
||||
To learn more about Invoke AI, get started instantly, or implement our Business solutions, visit [invoke.ai](https://invoke.ai)
|
||||
|
||||
|
||||
[![discord badge]][discord link]
|
||||
@ -56,9 +56,7 @@ the foundation for multiple commercial products.
|
||||
|
||||
<div align="center">
|
||||
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
</div>
|
||||
|
||||
@ -272,7 +270,7 @@ upgrade script.** See the next section for a Windows recipe.
|
||||
3. Select option [1] to upgrade to the latest release.
|
||||
|
||||
4. Once the upgrade is finished you will be returned to the launcher
|
||||
menu. Select option [6] "Re-run the configure script to fix a broken
|
||||
menu. Select option [7] "Re-run the configure script to fix a broken
|
||||
install or to complete a major upgrade".
|
||||
|
||||
This will run the configure script against the v2.3 directory and
|
||||
|
@ -2,17 +2,14 @@
|
||||
## Any environment variables supported by InvokeAI can be specified here,
|
||||
## in addition to the examples below.
|
||||
|
||||
# HOST_INVOKEAI_ROOT is the path on the docker host's filesystem where InvokeAI will store data.
|
||||
# INVOKEAI_ROOT is the path to a path on the local filesystem where InvokeAI will store data.
|
||||
# Outputs will also be stored here by default.
|
||||
# If relative, it will be relative to the docker directory in which the docker-compose.yml file is located
|
||||
#HOST_INVOKEAI_ROOT=../../invokeai-data
|
||||
|
||||
# INVOKEAI_ROOT is the path to the root of the InvokeAI repository within the container.
|
||||
# INVOKEAI_ROOT=~/invokeai
|
||||
# This **must** be an absolute path.
|
||||
INVOKEAI_ROOT=
|
||||
|
||||
# Get this value from your HuggingFace account settings page.
|
||||
# HUGGING_FACE_HUB_TOKEN=
|
||||
|
||||
## optional variables specific to the docker setup.
|
||||
# GPU_DRIVER=nvidia #| rocm
|
||||
# GPU_DRIVER=cuda # or rocm
|
||||
# CONTAINER_UID=1000
|
||||
|
@ -59,16 +59,14 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
|
||||
# #### Build the Web UI ------------------------------------
|
||||
|
||||
FROM node:20-slim AS web-builder
|
||||
ENV PNPM_HOME="/pnpm"
|
||||
ENV PATH="$PNPM_HOME:$PATH"
|
||||
RUN corepack enable
|
||||
|
||||
FROM node:18 AS web-builder
|
||||
WORKDIR /build
|
||||
COPY invokeai/frontend/web/ ./
|
||||
RUN --mount=type=cache,target=/pnpm/store \
|
||||
pnpm install --frozen-lockfile
|
||||
RUN npx vite build
|
||||
RUN --mount=type=cache,target=/usr/lib/node_modules \
|
||||
npm install --include dev
|
||||
RUN --mount=type=cache,target=/usr/lib/node_modules \
|
||||
yarn vite build
|
||||
|
||||
|
||||
#### Runtime stage ---------------------------------------
|
||||
|
||||
|
@ -1,14 +1,6 @@
|
||||
# InvokeAI Containerized
|
||||
|
||||
All commands should be run within the `docker` directory: `cd docker`
|
||||
|
||||
## Quickstart :rocket:
|
||||
|
||||
On a known working Linux+Docker+CUDA (Nvidia) system, execute `./run.sh` in this directory. It will take a few minutes - depending on your internet speed - to install the core models. Once the application starts up, open `http://localhost:9090` in your browser to Invoke!
|
||||
|
||||
For more configuration options (using an AMD GPU, custom root directory location, etc): read on.
|
||||
|
||||
## Detailed setup
|
||||
All commands are to be run from the `docker` directory: `cd docker`
|
||||
|
||||
#### Linux
|
||||
|
||||
@ -26,12 +18,12 @@ For more configuration options (using an AMD GPU, custom root directory location
|
||||
|
||||
This is done via Docker Desktop preferences
|
||||
|
||||
### Configure Invoke environment
|
||||
## Quickstart
|
||||
|
||||
1. Make a copy of `env.sample` and name it `.env` (`cp env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to:
|
||||
a. the desired location of the InvokeAI runtime directory, or
|
||||
b. an existing, v3.0.0 compatible runtime directory.
|
||||
1. Execute `run.sh`
|
||||
1. `docker compose up`
|
||||
|
||||
The image will be built automatically if needed.
|
||||
|
||||
@ -45,21 +37,19 @@ The runtime directory (holding models and outputs) will be created in the locati
|
||||
|
||||
The Docker daemon on the system must be already set up to use the GPU. In case of Linux, this involves installing `nvidia-docker-runtime` and configuring the `nvidia` runtime as default. Steps will be different for AMD. Please see Docker documentation for the most up-to-date instructions for using your GPU with Docker.
|
||||
|
||||
To use an AMD GPU, set `GPU_DRIVER=rocm` in your `.env` file.
|
||||
|
||||
## Customize
|
||||
|
||||
Check the `.env.sample` file. It contains some environment variables for running in Docker. Copy it, name it `.env`, and fill it in with your own values. Next time you run `run.sh`, your custom values will be used.
|
||||
Check the `.env.sample` file. It contains some environment variables for running in Docker. Copy it, name it `.env`, and fill it in with your own values. Next time you run `docker compose up`, your custom values will be used.
|
||||
|
||||
You can also set these values in `docker-compose.yml` directly, but `.env` will help avoid conflicts when code is updated.
|
||||
|
||||
Values are optional, but setting `INVOKEAI_ROOT` is highly recommended. The default is `~/invokeai`. Example:
|
||||
Example (values are optional, but setting `INVOKEAI_ROOT` is highly recommended):
|
||||
|
||||
```bash
|
||||
INVOKEAI_ROOT=/Volumes/WorkDrive/invokeai
|
||||
HUGGINGFACE_TOKEN=the_actual_token
|
||||
CONTAINER_UID=1000
|
||||
GPU_DRIVER=nvidia
|
||||
GPU_DRIVER=cuda
|
||||
```
|
||||
|
||||
Any environment variables supported by InvokeAI can be set here - please see the [Configuration docs](https://invoke-ai.github.io/InvokeAI/features/CONFIGURATION/) for further detail.
|
||||
|
11
docker/build.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
build_args=""
|
||||
|
||||
[[ -f ".env" ]] && build_args=$(awk '$1 ~ /\=[^$]/ {print "--build-arg " $0 " "}' .env)
|
||||
|
||||
echo "docker compose build args:"
|
||||
echo $build_args
|
||||
|
||||
docker compose build $build_args
|
@ -2,8 +2,23 @@
|
||||
|
||||
version: '3.8'
|
||||
|
||||
x-invokeai: &invokeai
|
||||
services:
|
||||
invokeai:
|
||||
image: "local/invokeai:latest"
|
||||
# edit below to run on a container runtime other than nvidia-container-runtime.
|
||||
# not yet tested with rocm/AMD GPUs
|
||||
# Comment out the "deploy" section to run on CPU only
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
# For AMD support, comment out the deploy section above and uncomment the devices section below:
|
||||
#devices:
|
||||
# - /dev/kfd:/dev/kfd
|
||||
# - /dev/dri:/dev/dri
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: docker/Dockerfile
|
||||
@ -21,9 +36,7 @@ x-invokeai: &invokeai
|
||||
ports:
|
||||
- "${INVOKEAI_PORT:-9090}:9090"
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ${HOST_INVOKEAI_ROOT:-${INVOKEAI_ROOT:-~/invokeai}}
|
||||
target: ${INVOKEAI_ROOT:-/invokeai}
|
||||
- ${INVOKEAI_ROOT:-~/invokeai}:${INVOKEAI_ROOT:-/invokeai}
|
||||
- ${HF_HOME:-~/.cache/huggingface}:${HF_HOME:-/invokeai/.cache/huggingface}
|
||||
# - ${INVOKEAI_MODELS_DIR:-${INVOKEAI_ROOT:-/invokeai/models}}
|
||||
# - ${INVOKEAI_MODELS_CONFIG_PATH:-${INVOKEAI_ROOT:-/invokeai/configs/models.yaml}}
|
||||
@ -37,27 +50,3 @@ x-invokeai: &invokeai
|
||||
# - |
|
||||
# invokeai-model-install --yes --default-only --config_file ${INVOKEAI_ROOT}/config_custom.yaml
|
||||
# invokeai-nodes-web --host 0.0.0.0
|
||||
|
||||
services:
|
||||
invokeai-nvidia:
|
||||
<<: *invokeai
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
|
||||
invokeai-cpu:
|
||||
<<: *invokeai
|
||||
profiles:
|
||||
- cpu
|
||||
|
||||
invokeai-rocm:
|
||||
<<: *invokeai
|
||||
devices:
|
||||
- /dev/kfd:/dev/kfd
|
||||
- /dev/dri:/dev/dri
|
||||
profiles:
|
||||
- rocm
|
||||
|
@ -1,32 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e -o pipefail
|
||||
set -e
|
||||
|
||||
run() {
|
||||
local scriptdir=$(dirname "${BASH_SOURCE[0]}")
|
||||
cd "$scriptdir" || exit 1
|
||||
# This script is provided for backwards compatibility with the old docker setup.
|
||||
# it doesn't do much aside from wrapping the usual docker compose CLI.
|
||||
|
||||
local build_args=""
|
||||
local profile=""
|
||||
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||
cd "$SCRIPTDIR" || exit 1
|
||||
|
||||
touch .env
|
||||
build_args=$(awk '$1 ~ /=[^$]/ && $0 !~ /^#/ {print "--build-arg " $0 " "}' .env) &&
|
||||
profile="$(awk -F '=' '/GPU_DRIVER/ {print $2}' .env)"
|
||||
|
||||
[[ -z "$profile" ]] && profile="nvidia"
|
||||
|
||||
local service_name="invokeai-$profile"
|
||||
|
||||
if [[ ! -z "$build_args" ]]; then
|
||||
printf "%s\n" "docker compose build args:"
|
||||
printf "%s\n" "$build_args"
|
||||
fi
|
||||
|
||||
docker compose build $build_args
|
||||
unset build_args
|
||||
|
||||
printf "%s\n" "starting service $service_name"
|
||||
docker compose --profile "$profile" up -d "$service_name"
|
||||
docker compose logs -f
|
||||
}
|
||||
|
||||
run
|
||||
docker compose up -d
|
||||
docker compose logs -f
|
||||
|
Before Width: | Height: | Size: 46 KiB After Width: | Height: | Size: 297 KiB |
Before Width: | Height: | Size: 4.9 MiB After Width: | Height: | Size: 1.1 MiB |
Before Width: | Height: | Size: 1.1 MiB After Width: | Height: | Size: 169 KiB |
Before Width: | Height: | Size: 131 KiB After Width: | Height: | Size: 194 KiB |
Before Width: | Height: | Size: 122 KiB After Width: | Height: | Size: 209 KiB |
Before Width: | Height: | Size: 95 KiB After Width: | Height: | Size: 114 KiB |
Before Width: | Height: | Size: 123 KiB After Width: | Height: | Size: 187 KiB |
Before Width: | Height: | Size: 107 KiB After Width: | Height: | Size: 112 KiB |
Before Width: | Height: | Size: 61 KiB After Width: | Height: | Size: 132 KiB |
Before Width: | Height: | Size: 119 KiB After Width: | Height: | Size: 167 KiB |
BIN
docs/assets/nodes/groupsrandseed.png
Normal file
After Width: | Height: | Size: 70 KiB |
Before Width: | Height: | Size: 60 KiB After Width: | Height: | Size: 59 KiB |
Before Width: | Height: | Size: 129 KiB |
@ -1,277 +0,0 @@
|
||||
# The InvokeAI Download Queue
|
||||
|
||||
The DownloadQueueService provides a multithreaded parallel download
|
||||
queue for arbitrary URLs, with queue prioritization, event handling,
|
||||
and restart capabilities.
|
||||
|
||||
## Simple Example
|
||||
|
||||
```
|
||||
from invokeai.app.services.download import DownloadQueueService, TqdmProgress
|
||||
|
||||
download_queue = DownloadQueueService()
|
||||
for url in ['https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/assets/a-painting-of-a-fire.png?raw=true',
|
||||
'https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/assets/birdhouse.png?raw=true',
|
||||
'https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/assets/missing.png',
|
||||
'https://civitai.com/api/download/models/152309?type=Model&format=SafeTensor',
|
||||
]:
|
||||
|
||||
# urls start downloading as soon as download() is called
|
||||
download_queue.download(source=url,
|
||||
dest='/tmp/downloads',
|
||||
on_progress=TqdmProgress().update
|
||||
)
|
||||
|
||||
download_queue.join() # wait for all downloads to finish
|
||||
for job in download_queue.list_jobs():
|
||||
print(job.model_dump_json(exclude_none=True, indent=4),"\n")
|
||||
```
|
||||
|
||||
Output:
|
||||
|
||||
```
|
||||
{
|
||||
"source": "https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/assets/a-painting-of-a-fire.png?raw=true",
|
||||
"dest": "/tmp/downloads",
|
||||
"id": 0,
|
||||
"priority": 10,
|
||||
"status": "completed",
|
||||
"download_path": "/tmp/downloads/a-painting-of-a-fire.png",
|
||||
"job_started": "2023-12-04T05:34:41.742174",
|
||||
"job_ended": "2023-12-04T05:34:42.592035",
|
||||
"bytes": 666734,
|
||||
"total_bytes": 666734
|
||||
}
|
||||
|
||||
{
|
||||
"source": "https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/assets/birdhouse.png?raw=true",
|
||||
"dest": "/tmp/downloads",
|
||||
"id": 1,
|
||||
"priority": 10,
|
||||
"status": "completed",
|
||||
"download_path": "/tmp/downloads/birdhouse.png",
|
||||
"job_started": "2023-12-04T05:34:41.741975",
|
||||
"job_ended": "2023-12-04T05:34:42.652841",
|
||||
"bytes": 774949,
|
||||
"total_bytes": 774949
|
||||
}
|
||||
|
||||
{
|
||||
"source": "https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/assets/missing.png",
|
||||
"dest": "/tmp/downloads",
|
||||
"id": 2,
|
||||
"priority": 10,
|
||||
"status": "error",
|
||||
"job_started": "2023-12-04T05:34:41.742079",
|
||||
"job_ended": "2023-12-04T05:34:42.147625",
|
||||
"bytes": 0,
|
||||
"total_bytes": 0,
|
||||
"error_type": "HTTPError(Not Found)",
|
||||
"error": "Traceback (most recent call last):\n File \"/home/lstein/Projects/InvokeAI/invokeai/app/services/download/download_default.py\", line 182, in _download_next_item\n self._do_download(job)\n File \"/home/lstein/Projects/InvokeAI/invokeai/app/services/download/download_default.py\", line 206, in _do_download\n raise HTTPError(resp.reason)\nrequests.exceptions.HTTPError: Not Found\n"
|
||||
}
|
||||
|
||||
{
|
||||
"source": "https://civitai.com/api/download/models/152309?type=Model&format=SafeTensor",
|
||||
"dest": "/tmp/downloads",
|
||||
"id": 3,
|
||||
"priority": 10,
|
||||
"status": "completed",
|
||||
"download_path": "/tmp/downloads/xl_more_art-full_v1.safetensors",
|
||||
"job_started": "2023-12-04T05:34:42.147645",
|
||||
"job_ended": "2023-12-04T05:34:43.735990",
|
||||
"bytes": 719020768,
|
||||
"total_bytes": 719020768
|
||||
}
|
||||
```
|
||||
|
||||
## The API
|
||||
|
||||
The default download queue is `DownloadQueueService`, an
|
||||
implementation of ABC `DownloadQueueServiceBase`. It juggles multiple
|
||||
background download requests and provides facilities for interrogating
|
||||
and cancelling the requests. Access to a current or past download task
|
||||
is mediated via `DownloadJob` objects which report the current status
|
||||
of a job request
|
||||
|
||||
### The Queue Object
|
||||
|
||||
A default download queue is located in
|
||||
`ApiDependencies.invoker.services.download_queue`. However, you can
|
||||
create additional instances if you need to isolate your queue from the
|
||||
main one.
|
||||
|
||||
```
|
||||
queue = DownloadQueueService(event_bus=events)
|
||||
```
|
||||
|
||||
`DownloadQueueService()` takes three optional arguments:
|
||||
|
||||
| **Argument** | **Type** | **Default** | **Description** |
|
||||
|----------------|-----------------|---------------|-----------------|
|
||||
| `max_parallel_dl` | int | 5 | Maximum number of simultaneous downloads allowed |
|
||||
| `event_bus` | EventServiceBase | None | System-wide FastAPI event bus for reporting download events |
|
||||
| `requests_session` | requests.sessions.Session | None | An alternative requests Session object to use for the download |
|
||||
|
||||
`max_parallel_dl` specifies how many download jobs are allowed to run
|
||||
simultaneously. Each will run in a different thread of execution.
|
||||
|
||||
`event_bus` is an EventServiceBase, typically the one created at
|
||||
InvokeAI startup. If present, download events are periodically emitted
|
||||
on this bus to allow clients to follow download progress.
|
||||
|
||||
`requests_session` is a url library requests Session object. It is
|
||||
used for testing.
|
||||
|
||||
### The Job object
|
||||
|
||||
The queue operates on a series of download job objects. These objects
|
||||
specify the source and destination of the download, and keep track of
|
||||
the progress of the download.
|
||||
|
||||
The only job type currently implemented is `DownloadJob`, a pydantic object with the
|
||||
following fields:
|
||||
|
||||
| **Field** | **Type** | **Default** | **Description** |
|
||||
|----------------|-----------------|---------------|-----------------|
|
||||
| _Fields passed in at job creation time_ |
|
||||
| `source` | AnyHttpUrl | | Where to download from |
|
||||
| `dest` | Path | | Where to download to |
|
||||
| `access_token` | str | | [optional] string containing authentication token for access |
|
||||
| `on_start` | Callable | | [optional] callback when the download starts |
|
||||
| `on_progress` | Callable | | [optional] callback called at intervals during download progress |
|
||||
| `on_complete` | Callable | | [optional] callback called after successful download completion |
|
||||
| `on_error` | Callable | | [optional] callback called after an error occurs |
|
||||
| `id` | int | auto assigned | Job ID, an integer >= 0 |
|
||||
| `priority` | int | 10 | Job priority. Lower priorities run before higher priorities |
|
||||
| |
|
||||
| _Fields updated over the course of the download task_
|
||||
| `status` | DownloadJobStatus| | Status code |
|
||||
| `download_path` | Path | | Path to the location of the downloaded file |
|
||||
| `job_started` | float | | Timestamp for when the job started running |
|
||||
| `job_ended` | float | | Timestamp for when the job completed or errored out |
|
||||
| `job_sequence` | int | | A counter that is incremented each time a model is dequeued |
|
||||
| `bytes` | int | 0 | Bytes downloaded so far |
|
||||
| `total_bytes` | int | 0 | Total size of the file at the remote site |
|
||||
| `error_type` | str | | String version of the exception that caused an error during download |
|
||||
| `error` | str | | String version of the traceback associated with an error |
|
||||
| `cancelled` | bool | False | Set to true if the job was cancelled by the caller|
|
||||
|
||||
When you create a job, you can assign it a `priority`. If multiple
|
||||
jobs are queued, the job with the lowest priority runs first.
|
||||
|
||||
Every job has a `source` and a `dest`. `source` is a pydantic.networks AnyHttpUrl object.
|
||||
The `dest` is a path on the local filesystem that specifies the
|
||||
destination for the downloaded object. Its semantics are
|
||||
described below.
|
||||
|
||||
When the job is submitted, it is assigned a numeric `id`. The id can
|
||||
then be used to fetch the job object from the queue.
|
||||
|
||||
The `status` field is updated by the queue to indicate where the job
|
||||
is in its lifecycle. Values are defined in the string enum
|
||||
`DownloadJobStatus`, a symbol available from
|
||||
`invokeai.app.services.download_manager`. Possible values are:
|
||||
|
||||
| **Value** | **String Value** | ** Description ** |
|
||||
|--------------|---------------------|-------------------|
|
||||
| `WAITING` | waiting | Job is on the queue but not yet running|
|
||||
| `RUNNING` | running | The download is started |
|
||||
| `COMPLETED` | completed | Job has finished its work without an error |
|
||||
| `ERROR` | error | Job encountered an error and will not run again|
|
||||
|
||||
`job_started` and `job_ended` indicate when the job
|
||||
was started (using a python timestamp) and when it completed.
|
||||
|
||||
In case of an error, the job's status will be set to `DownloadJobStatus.ERROR`, the text of the
|
||||
Exception that caused the error will be placed in the `error_type`
|
||||
field and the traceback that led to the error will be in `error`.
|
||||
|
||||
A cancelled job will have status `DownloadJobStatus.ERROR` and an
|
||||
`error_type` field of "DownloadJobCancelledException". In addition,
|
||||
the job's `cancelled` property will be set to True.
|
||||
|
||||
### Callbacks
|
||||
|
||||
Download jobs can be associated with a series of callbacks, each with
|
||||
the signature `Callable[["DownloadJob"], None]`. The callbacks are assigned
|
||||
using optional arguments `on_start`, `on_progress`, `on_complete` and
|
||||
`on_error`. When the corresponding event occurs, the callback wil be
|
||||
invoked and passed the job. The callback will be run in a `try:`
|
||||
context in the same thread as the download job. Any exceptions that
|
||||
occur during execution of the callback will be caught and converted
|
||||
into a log error message, thereby allowing the download to continue.
|
||||
|
||||
#### `TqdmProgress`
|
||||
|
||||
The `invokeai.app.services.download.download_default` module defines a
|
||||
class named `TqdmProgress` which can be used as an `on_progress`
|
||||
handler to display a completion bar in the console. Use as follows:
|
||||
|
||||
```
|
||||
from invokeai.app.services.download import TqdmProgress
|
||||
|
||||
download_queue.download(source='http://some.server.somewhere/some_file',
|
||||
dest='/tmp/downloads',
|
||||
on_progress=TqdmProgress().update
|
||||
)
|
||||
|
||||
```
|
||||
|
||||
### Events
|
||||
|
||||
If the queue was initialized with the InvokeAI event bus (the case
|
||||
when using `ApiDependencies.invoker.services.download_queue`), then
|
||||
download events will also be issued on the bus. The events are:
|
||||
|
||||
* `download_started` -- This is issued when a job is taken off the
|
||||
queue and a request is made to the remote server for the URL headers, but before any data
|
||||
has been downloaded. The event payload will contain the keys `source`
|
||||
and `download_path`. The latter contains the path that the URL will be
|
||||
downloaded to.
|
||||
|
||||
* `download_progress -- This is issued periodically as the download
|
||||
runs. The payload contains the keys `source`, `download_path`,
|
||||
`current_bytes` and `total_bytes`. The latter two fields can be
|
||||
used to display the percent complete.
|
||||
|
||||
* `download_complete` -- This is issued when the download completes
|
||||
successfully. The payload contains the keys `source`, `download_path`
|
||||
and `total_bytes`.
|
||||
|
||||
* `download_error` -- This is issued when the download stops because
|
||||
of an error condition. The payload contains the fields `error_type`
|
||||
and `error`. The former is the text representation of the exception,
|
||||
and the latter is a traceback showing where the error occurred.
|
||||
|
||||
### Job control
|
||||
|
||||
To create a job call the queue's `download()` method. You can list all
|
||||
jobs using `list_jobs()`, fetch a single job by its with
|
||||
`id_to_job()`, cancel a running job with `cancel_job()`, cancel all
|
||||
running jobs with `cancel_all_jobs()`, and wait for all jobs to finish
|
||||
with `join()`.
|
||||
|
||||
#### job = queue.download(source, dest, priority, access_token)
|
||||
|
||||
Create a new download job and put it on the queue, returning the
|
||||
DownloadJob object.
|
||||
|
||||
#### jobs = queue.list_jobs()
|
||||
|
||||
Return a list of all active and inactive `DownloadJob`s.
|
||||
|
||||
#### job = queue.id_to_job(id)
|
||||
|
||||
Return the job corresponding to given ID.
|
||||
|
||||
Return a list of all active and inactive `DownloadJob`s.
|
||||
|
||||
#### queue.prune_jobs()
|
||||
|
||||
Remove inactive (complete or errored) jobs from the listing returned
|
||||
by `list_jobs()`.
|
||||
|
||||
#### queue.join()
|
||||
|
||||
Block until all pending jobs have run to completion or errored out.
|
||||
|
@ -15,13 +15,8 @@ model. These are the:
|
||||
their metadata, and `ModelRecordServiceBase` to store that
|
||||
information. It is also responsible for managing the InvokeAI
|
||||
`models` directory and its contents.
|
||||
|
||||
* _ModelMetadataStore_ and _ModelMetaDataFetch_ Backend modules that
|
||||
are able to retrieve metadata from online model repositories,
|
||||
transform them into Pydantic models, and cache them to the InvokeAI
|
||||
SQL database.
|
||||
|
||||
* _DownloadQueueServiceBase_
|
||||
|
||||
* _DownloadQueueServiceBase_ (**CURRENTLY UNDER DEVELOPMENT - NOT IMPLEMENTED**)
|
||||
A multithreaded downloader responsible
|
||||
for downloading models from a remote source to disk. The download
|
||||
queue has special methods for downloading repo_id folders from
|
||||
@ -35,13 +30,13 @@ model. These are the:
|
||||
|
||||
## Location of the Code
|
||||
|
||||
The four main services can be found in
|
||||
All four of these services can be found in
|
||||
`invokeai/app/services` in the following directories:
|
||||
|
||||
* `invokeai/app/services/model_records/`
|
||||
* `invokeai/app/services/model_install/`
|
||||
* `invokeai/app/services/downloads/`
|
||||
* `invokeai/app/services/model_loader/` (**under development**)
|
||||
* `invokeai/app/services/downloads/`(**under development**)
|
||||
|
||||
Code related to the FastAPI web API can be found in
|
||||
`invokeai/app/api/routers/model_records.py`.
|
||||
@ -407,18 +402,15 @@ functionality:
|
||||
the download, installation and registration process.
|
||||
|
||||
- Downloading a model from an arbitrary URL and installing it in
|
||||
`models_dir`.
|
||||
`models_dir` (_implementation pending_).
|
||||
|
||||
- Special handling for Civitai model URLs which allow the user to
|
||||
paste in a model page's URL or download link
|
||||
paste in a model page's URL or download link (_implementation pending_).
|
||||
|
||||
|
||||
- Special handling for HuggingFace repo_ids to recursively download
|
||||
the contents of the repository, paying attention to alternative
|
||||
variants such as fp16.
|
||||
|
||||
- Saving tags and other metadata about the model into the invokeai database
|
||||
when fetching from a repo that provides that type of information,
|
||||
(currently only Civitai and HuggingFace).
|
||||
variants such as fp16. (_implementation pending_)
|
||||
|
||||
### Initializing the installer
|
||||
|
||||
@ -434,24 +426,16 @@ following initialization pattern:
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.model_records import ModelRecordServiceSQL
|
||||
from invokeai.app.services.model_install import ModelInstallService
|
||||
from invokeai.app.services.download import DownloadQueueService
|
||||
from invokeai.app.services.shared.sqlite import SqliteDatabase
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
config.parse_args()
|
||||
|
||||
logger = InvokeAILogger.get_logger(config=config)
|
||||
db = SqliteDatabase(config, logger)
|
||||
record_store = ModelRecordServiceSQL(db)
|
||||
queue = DownloadQueueService()
|
||||
queue.start()
|
||||
|
||||
installer = ModelInstallService(app_config=config,
|
||||
record_store=record_store,
|
||||
download_queue=queue
|
||||
)
|
||||
installer.start()
|
||||
store = ModelRecordServiceSQL(db)
|
||||
installer = ModelInstallService(config, store)
|
||||
```
|
||||
|
||||
The full form of `ModelInstallService()` takes the following
|
||||
@ -459,12 +443,9 @@ required parameters:
|
||||
|
||||
| **Argument** | **Type** | **Description** |
|
||||
|------------------|------------------------------|------------------------------|
|
||||
| `app_config` | InvokeAIAppConfig | InvokeAI app configuration object |
|
||||
| `config` | InvokeAIAppConfig | InvokeAI app configuration object |
|
||||
| `record_store` | ModelRecordServiceBase | Config record storage database |
|
||||
| `download_queue` | DownloadQueueServiceBase | Download queue object |
|
||||
| `metadata_store` | Optional[ModelMetadataStore] | Metadata storage object |
|
||||
|`session` | Optional[requests.Session] | Swap in a different Session object (usually for debugging) |
|
||||
|
||||
| `event_bus` | EventServiceBase | Optional event bus to send download/install progress events to |
|
||||
|
||||
Once initialized, the installer will provide the following methods:
|
||||
|
||||
@ -493,14 +474,14 @@ source7 = URLModelSource(url='https://civitai.com/api/download/models/63006', ac
|
||||
for source in [source1, source2, source3, source4, source5, source6, source7]:
|
||||
install_job = installer.install_model(source)
|
||||
|
||||
source2job = installer.wait_for_installs(timeout=120)
|
||||
source2job = installer.wait_for_installs()
|
||||
for source in sources:
|
||||
job = source2job[source]
|
||||
if job.complete:
|
||||
if job.status == "completed":
|
||||
model_config = job.config_out
|
||||
model_key = model_config.key
|
||||
print(f"{source} installed as {model_key}")
|
||||
elif job.errored:
|
||||
elif job.status == "error":
|
||||
print(f"{source}: {job.error_type}.\nStack trace:\n{job.error}")
|
||||
|
||||
```
|
||||
@ -534,117 +515,43 @@ The full list of arguments to `import_model()` is as follows:
|
||||
|
||||
| **Argument** | **Type** | **Default** | **Description** |
|
||||
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||
| `source` | ModelSource | None | The source of the model, Path, URL or repo_id |
|
||||
| `source` | Union[str, Path, AnyHttpUrl] | | The source of the model, Path, URL or repo_id |
|
||||
| `inplace` | bool | True | Leave a local model in its current location |
|
||||
| `variant` | str | None | Desired variant, such as 'fp16' or 'onnx' (HuggingFace only) |
|
||||
| `subfolder` | str | None | Repository subfolder (HuggingFace only) |
|
||||
| `config` | Dict[str, Any] | None | Override all or a portion of model's probed attributes |
|
||||
| `access_token` | str | None | Provide authorization information needed to download |
|
||||
|
||||
The next few sections describe the various types of ModelSource that
|
||||
can be passed to `import_model()`.
|
||||
|
||||
The `inplace` field controls how local model Paths are handled. If
|
||||
True (the default), then the model is simply registered in its current
|
||||
location by the installer's `ModelConfigRecordService`. Otherwise, a
|
||||
copy of the model put into the location specified by the `models_dir`
|
||||
application configuration parameter.
|
||||
|
||||
The `variant` field is used for HuggingFace repo_ids only. If
|
||||
provided, the repo_id download handler will look for and download
|
||||
tensors files that follow the convention for the selected variant:
|
||||
|
||||
- "fp16" will select files named "*model.fp16.{safetensors,bin}"
|
||||
- "onnx" will select files ending with the suffix ".onnx"
|
||||
- "openvino" will select files beginning with "openvino_model"
|
||||
|
||||
In the special case of the "fp16" variant, the installer will select
|
||||
the 32-bit version of the files if the 16-bit version is unavailable.
|
||||
|
||||
`subfolder` is used for HuggingFace repo_ids only. If provided, the
|
||||
model will be downloaded from the designated subfolder rather than the
|
||||
top-level repository folder. If a subfolder is attached to the repo_id
|
||||
using the format `repo_owner/repo_name:subfolder`, then the subfolder
|
||||
specified by the repo_id will override the subfolder argument.
|
||||
|
||||
`config` can be used to override all or a portion of the configuration
|
||||
attributes returned by the model prober. See the section below for
|
||||
details.
|
||||
|
||||
|
||||
#### LocalModelSource
|
||||
|
||||
This is used for a model that is located on a locally-accessible Posix
|
||||
filesystem, such as a local disk or networked fileshare.
|
||||
|
||||
|
||||
| **Argument** | **Type** | **Default** | **Description** |
|
||||
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||
| `path` | str | Path | None | Path to the model file or directory |
|
||||
| `inplace` | bool | False | If set, the model file(s) will be left in their location; otherwise they will be copied into the InvokeAI root's `models` directory |
|
||||
|
||||
#### URLModelSource
|
||||
|
||||
This is used for a single-file model that is accessible via a URL. The
|
||||
fields are:
|
||||
|
||||
| **Argument** | **Type** | **Default** | **Description** |
|
||||
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||
| `url` | AnyHttpUrl | None | The URL for the model file. |
|
||||
| `access_token` | str | None | An access token needed to gain access to this file. |
|
||||
|
||||
The `AnyHttpUrl` class can be imported from `pydantic.networks`.
|
||||
|
||||
Ordinarily, no metadata is retrieved from these sources. However,
|
||||
there is special-case code in the installer that looks for HuggingFace
|
||||
and Civitai URLs and fetches the corresponding model metadata from
|
||||
the corresponding repo.
|
||||
|
||||
#### CivitaiModelSource
|
||||
|
||||
This is used for a model that is hosted by the Civitai web site.
|
||||
|
||||
| **Argument** | **Type** | **Default** | **Description** |
|
||||
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||
| `version_id` | int | None | The ID of the particular version of the desired model. |
|
||||
| `access_token` | str | None | An access token needed to gain access to a subscriber's-only model. |
|
||||
|
||||
Civitai has two model IDs, both of which are integers. The `model_id`
|
||||
corresponds to a collection of model versions that may different in
|
||||
arbitrary ways, such as derivation from different checkpoint training
|
||||
steps, SFW vs NSFW generation, pruned vs non-pruned, etc. The
|
||||
`version_id` points to a specific version. Please use the latter.
|
||||
|
||||
Some Civitai models require an access token to download. These can be
|
||||
generated from the Civitai profile page of a logged-in
|
||||
account. Somewhat annoyingly, if you fail to provide the access token
|
||||
when downloading a model that needs it, Civitai generates a redirect
|
||||
to a login page rather than a 403 Forbidden error. The installer
|
||||
attempts to catch this event and issue an informative error
|
||||
message. Otherwise you will get an "unrecognized model suffix" error
|
||||
when the model prober tries to identify the type of the HTML login
|
||||
page.
|
||||
|
||||
#### HFModelSource
|
||||
|
||||
HuggingFace has the most complicated `ModelSource` structure:
|
||||
|
||||
| **Argument** | **Type** | **Default** | **Description** |
|
||||
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||
| `repo_id` | str | None | The ID of the desired model. |
|
||||
| `variant` | ModelRepoVariant | ModelRepoVariant('fp16') | The desired variant. |
|
||||
| `subfolder` | Path | None | Look for the model in a subfolder of the repo. |
|
||||
| `access_token` | str | None | An access token needed to gain access to a subscriber's-only model. |
|
||||
|
||||
|
||||
The `repo_id` is the repository ID, such as `stabilityai/sdxl-turbo`.
|
||||
|
||||
The `variant` is one of the various diffusers formats that HuggingFace
|
||||
supports and is used to pick out from the hodgepodge of files that in
|
||||
a typical HuggingFace repository the particular components needed for
|
||||
a complete diffusers model. `ModelRepoVariant` is an enum that can be
|
||||
imported from `invokeai.backend.model_manager` and has the following
|
||||
values:
|
||||
|
||||
| **Name** | **String Value** |
|
||||
|----------------------------|---------------------------|
|
||||
| ModelRepoVariant.DEFAULT | "default" |
|
||||
| ModelRepoVariant.FP16 | "fp16" |
|
||||
| ModelRepoVariant.FP32 | "fp32" |
|
||||
| ModelRepoVariant.ONNX | "onnx" |
|
||||
| ModelRepoVariant.OPENVINO | "openvino" |
|
||||
| ModelRepoVariant.FLAX | "flax" |
|
||||
|
||||
You can also pass the string forms to `variant` directly. Note that
|
||||
InvokeAI may not be able to load and run all variants. At the current
|
||||
time, specifying `ModelRepoVariant.DEFAULT` will retrieve model files
|
||||
that are unqualified, e.g. `pytorch_model.safetensors` rather than
|
||||
`pytorch_model.fp16.safetensors`. These are usually the 32-bit
|
||||
safetensors forms of the model.
|
||||
|
||||
If `subfolder` is specified, then the requested model resides in a
|
||||
subfolder of the main model repository. This is typically used to
|
||||
fetch and install VAEs.
|
||||
|
||||
Some models require you to be registered with HuggingFace and logged
|
||||
in. To download these files, you must provide an
|
||||
`access_token`. Internally, if no access token is provided, then
|
||||
`HfFolder.get_token()` will be called to fill it in with the cached
|
||||
one.
|
||||
|
||||
`access_token` is passed to the download queue and used to access
|
||||
repositories that require it.
|
||||
|
||||
#### Monitoring the install job process
|
||||
|
||||
@ -656,8 +563,7 @@ The `ModelInstallJob` class has the following structure:
|
||||
|
||||
| **Attribute** | **Type** | **Description** |
|
||||
|----------------|-----------------|------------------|
|
||||
| `id` | `int` | Integer ID for this job |
|
||||
| `status` | `InstallStatus` | An enum of [`waiting`, `downloading`, `running`, `completed`, `error` and `cancelled`]|
|
||||
| `status` | `InstallStatus` | An enum of ["waiting", "running", "completed" and "error" |
|
||||
| `config_in` | `dict` | Overriding configuration values provided by the caller |
|
||||
| `config_out` | `AnyModelConfig`| After successful completion, contains the configuration record written to the database |
|
||||
| `inplace` | `boolean` | True if the caller asked to install the model in place using its local path |
|
||||
@ -672,70 +578,30 @@ broadcast to the InvokeAI event bus. The events will appear on the bus
|
||||
as an event of type `EventServiceBase.model_event`, a timestamp and
|
||||
the following event names:
|
||||
|
||||
##### `model_install_downloading`
|
||||
- `model_install_started`
|
||||
|
||||
For remote models only, `model_install_downloading` events will be issued at regular
|
||||
intervals as the download progresses. The event's payload contains the
|
||||
following keys:
|
||||
The payload will contain the keys `timestamp` and `source`. The latter
|
||||
indicates the requested model source for installation.
|
||||
|
||||
| **Key** | **Type** | **Description** |
|
||||
|----------------|-----------|------------------|
|
||||
| `source` | str | String representation of the requested source |
|
||||
| `local_path` | str | String representation of the path to the downloading model (usually a temporary directory) |
|
||||
| `bytes` | int | How many bytes downloaded so far |
|
||||
| `total_bytes` | int | Total size of all the files that make up the model |
|
||||
| `parts` | List[Dict]| Information on the progress of the individual files that make up the model |
|
||||
- `model_install_progress`
|
||||
|
||||
Emitted at regular intervals when downloading a remote model, the
|
||||
payload will contain the keys `timestamp`, `source`, `current_bytes`
|
||||
and `total_bytes`. These events are _not_ emitted when a local model
|
||||
already on the filesystem is imported.
|
||||
|
||||
The parts is a list of dictionaries that give information on each of
|
||||
the components pieces of the download. The dictionary's keys are
|
||||
`source`, `local_path`, `bytes` and `total_bytes`, and correspond to
|
||||
the like-named keys in the main event.
|
||||
- `model_install_completed`
|
||||
|
||||
Note that downloading events will not be issued for local models, and
|
||||
that downloading events occur *before* the running event.
|
||||
Issued once at the end of a successful installation. The payload will
|
||||
contain the keys `timestamp`, `source` and `key`, where `key` is the
|
||||
ID under which the model has been registered.
|
||||
|
||||
##### `model_install_running`
|
||||
|
||||
`model_install_running` is issued when all the required downloads have completed (if applicable) and the
|
||||
model probing, copying and registration process has now started.
|
||||
|
||||
The payload will contain the key `source`.
|
||||
|
||||
##### `model_install_completed`
|
||||
|
||||
`model_install_completed` is issued once at the end of a successful
|
||||
installation. The payload will contain the keys `source`,
|
||||
`total_bytes` and `key`, where `key` is the ID under which the model
|
||||
has been registered.
|
||||
|
||||
##### `model_install_error`
|
||||
|
||||
`model_install_error` is emitted if the installation process fails for
|
||||
some reason. The payload will contain the keys `source`, `error_type`
|
||||
and `error`. `error_type` is a short message indicating the nature of
|
||||
the error, and `error` is the long traceback to help debug the
|
||||
problem.
|
||||
|
||||
##### `model_install_cancelled`
|
||||
|
||||
`model_install_cancelled` is issued if the model installation is
|
||||
cancelled, or if one or more of its files' downloads are
|
||||
cancelled. The payload will contain `source`.
|
||||
|
||||
##### Following the model status
|
||||
|
||||
You may poll the `ModelInstallJob` object returned by `import_model()`
|
||||
to ascertain the state of the install. The job status can be read from
|
||||
the job's `status` attribute, an `InstallStatus` enum which has the
|
||||
enumerated values `WAITING`, `DOWNLOADING`, `RUNNING`, `COMPLETED`,
|
||||
`ERROR` and `CANCELLED`.
|
||||
|
||||
For convenience, install jobs also provided the following boolean
|
||||
properties: `waiting`, `downloading`, `running`, `complete`, `errored`
|
||||
and `cancelled`, as well as `in_terminal_state`. The last will return
|
||||
True if the job is in the complete, errored or cancelled states.
|
||||
- `model_install_error`
|
||||
|
||||
Emitted if the installation process fails for some reason. The payload
|
||||
will contain the keys `timestamp`, `source`, `error_type` and
|
||||
`error`. `error_type` is a short message indicating the nature of the
|
||||
error, and `error` is the long traceback to help debug the problem.
|
||||
|
||||
#### Model confguration and probing
|
||||
|
||||
@ -755,9 +621,17 @@ overriding values for any of the model's configuration
|
||||
attributes. Here is an example of setting the
|
||||
`SchedulerPredictionType` and `name` for an sd-2 model:
|
||||
|
||||
This is typically used to set
|
||||
the model's name and description, but can also be used to overcome
|
||||
cases in which automatic probing is unable to (correctly) determine
|
||||
the model's attribute. The most common situation is the
|
||||
`prediction_type` field for sd-2 (and rare sd-1) models. Here is an
|
||||
example of how it works:
|
||||
|
||||
```
|
||||
install_job = installer.import_model(
|
||||
source=HFModelSource(repo_id='stabilityai/stable-diffusion-2-1',variant='fp32'),
|
||||
source='stabilityai/stable-diffusion-2-1',
|
||||
variant='fp16',
|
||||
config=dict(
|
||||
prediction_type=SchedulerPredictionType('v_prediction')
|
||||
name='stable diffusion 2 base model',
|
||||
@ -769,38 +643,29 @@ install_job = installer.import_model(
|
||||
|
||||
This section describes additional methods provided by the installer class.
|
||||
|
||||
#### jobs = installer.wait_for_installs([timeout])
|
||||
#### jobs = installer.wait_for_installs()
|
||||
|
||||
Block until all pending installs are completed or errored and then
|
||||
returns a list of completed jobs. The optional `timeout` argument will
|
||||
return from the call if jobs aren't completed in the specified
|
||||
time. An argument of 0 (the default) will block indefinitely.
|
||||
returns a list of completed jobs.
|
||||
|
||||
#### jobs = installer.list_jobs()
|
||||
#### jobs = installer.list_jobs([source])
|
||||
|
||||
Return a list of all active and complete `ModelInstallJobs`.
|
||||
Return a list of all active and complete `ModelInstallJobs`. An
|
||||
optional `source` argument allows you to filter the returned list by a
|
||||
model source string pattern using a partial string match.
|
||||
|
||||
#### jobs = installer.get_job_by_source(source)
|
||||
#### jobs = installer.get_job(source)
|
||||
|
||||
Return a list of `ModelInstallJob` corresponding to the indicated
|
||||
model source.
|
||||
|
||||
#### jobs = installer.get_job_by_id(id)
|
||||
|
||||
Return a list of `ModelInstallJob` corresponding to the indicated
|
||||
model id.
|
||||
|
||||
#### jobs = installer.cancel_job(job)
|
||||
|
||||
Cancel the indicated job.
|
||||
|
||||
#### installer.prune_jobs
|
||||
|
||||
Remove jobs that are in a terminal state (i.e. complete, errored or
|
||||
cancelled) from the job list returned by `list_jobs()` and
|
||||
`get_job()`.
|
||||
Remove non-pending jobs (completed or errored) from the job list
|
||||
returned by `list_jobs()` and `get_job()`.
|
||||
|
||||
#### installer.app_config, installer.record_store, installer.event_bus
|
||||
#### installer.app_config, installer.record_store,
|
||||
installer.event_bus
|
||||
|
||||
Properties that provide access to the installer's `InvokeAIAppConfig`,
|
||||
`ModelRecordServiceBase` and `EventServiceBase` objects.
|
||||
@ -861,6 +726,120 @@ the API starts up. Its effect is to call `sync_to_config()` to
|
||||
synchronize the model record store database with what's currently on
|
||||
disk.
|
||||
|
||||
# The remainder of this documentation is provisional, pending implementation of the Download and Load services
|
||||
|
||||
## Let's get loaded, the lowdown on ModelLoadService
|
||||
|
||||
The `ModelLoadService` is responsible for loading a named model into
|
||||
memory so that it can be used for inference. Despite the fact that it
|
||||
does a lot under the covers, it is very straightforward to use.
|
||||
|
||||
An application-wide model loader is created at API initialization time
|
||||
and stored in
|
||||
`ApiDependencies.invoker.services.model_loader`. However, you can
|
||||
create alternative instances if you wish.
|
||||
|
||||
### Creating a ModelLoadService object
|
||||
|
||||
The class is defined in
|
||||
`invokeai.app.services.model_loader_service`. It is initialized with
|
||||
an InvokeAIAppConfig object, from which it gets configuration
|
||||
information such as the user's desired GPU and precision, and with a
|
||||
previously-created `ModelRecordServiceBase` object, from which it
|
||||
loads the requested model's configuration information.
|
||||
|
||||
Here is a typical initialization pattern:
|
||||
|
||||
```
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.model_record_service import ModelRecordServiceBase
|
||||
from invokeai.app.services.model_loader_service import ModelLoadService
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
store = ModelRecordServiceBase.open(config)
|
||||
loader = ModelLoadService(config, store)
|
||||
```
|
||||
|
||||
Note that we are relying on the contents of the application
|
||||
configuration to choose the implementation of
|
||||
`ModelRecordServiceBase`.
|
||||
|
||||
### get_model(key, [submodel_type], [context]) -> ModelInfo:
|
||||
|
||||
*** TO DO: change to get_model(key, context=None, **kwargs)
|
||||
|
||||
The `get_model()` method, like its similarly-named cousin in
|
||||
`ModelRecordService`, receives the unique key that identifies the
|
||||
model. It loads the model into memory, gets the model ready for use,
|
||||
and returns a `ModelInfo` object.
|
||||
|
||||
The optional second argument, `subtype` is a `SubModelType` string
|
||||
enum, such as "vae". It is mandatory when used with a main model, and
|
||||
is used to select which part of the main model to load.
|
||||
|
||||
The optional third argument, `context` can be provided by
|
||||
an invocation to trigger model load event reporting. See below for
|
||||
details.
|
||||
|
||||
The returned `ModelInfo` object shares some fields in common with
|
||||
`ModelConfigBase`, but is otherwise a completely different beast:
|
||||
|
||||
| **Field Name** | **Type** | **Description** |
|
||||
|----------------|-----------------|------------------|
|
||||
| `key` | str | The model key derived from the ModelRecordService database |
|
||||
| `name` | str | Name of this model |
|
||||
| `base_model` | BaseModelType | Base model for this model |
|
||||
| `type` | ModelType or SubModelType | Either the model type (non-main) or the submodel type (main models)|
|
||||
| `location` | Path or str | Location of the model on the filesystem |
|
||||
| `precision` | torch.dtype | The torch.precision to use for inference |
|
||||
| `context` | ModelCache.ModelLocker | A context class used to lock the model in VRAM while in use |
|
||||
|
||||
The types for `ModelInfo` and `SubModelType` can be imported from
|
||||
`invokeai.app.services.model_loader_service`.
|
||||
|
||||
To use the model, you use the `ModelInfo` as a context manager using
|
||||
the following pattern:
|
||||
|
||||
```
|
||||
model_info = loader.get_model('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae'))
|
||||
with model_info as vae:
|
||||
image = vae.decode(latents)[0]
|
||||
```
|
||||
|
||||
The `vae` model will stay locked in the GPU during the period of time
|
||||
it is in the context manager's scope.
|
||||
|
||||
`get_model()` may raise any of the following exceptions:
|
||||
|
||||
- `UnknownModelException` -- key not in database
|
||||
- `ModelNotFoundException` -- key in database but model not found at path
|
||||
- `InvalidModelException` -- the model is guilty of a variety of sins
|
||||
|
||||
** TO DO: ** Resolve discrepancy between ModelInfo.location and
|
||||
ModelConfig.path.
|
||||
|
||||
### Emitting model loading events
|
||||
|
||||
When the `context` argument is passed to `get_model()`, it will
|
||||
retrieve the invocation event bus from the passed `InvocationContext`
|
||||
object to emit events on the invocation bus. The two events are
|
||||
"model_load_started" and "model_load_completed". Both carry the
|
||||
following payload:
|
||||
|
||||
```
|
||||
payload=dict(
|
||||
queue_id=queue_id,
|
||||
queue_item_id=queue_item_id,
|
||||
queue_batch_id=queue_batch_id,
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
model_key=model_key,
|
||||
submodel=submodel,
|
||||
hash=model_info.hash,
|
||||
location=str(model_info.location),
|
||||
precision=str(model_info.precision),
|
||||
)
|
||||
```
|
||||
|
||||
***
|
||||
|
||||
## Get on line: The Download Queue
|
||||
@ -900,6 +879,7 @@ following fields:
|
||||
| `job_started` | float | | Timestamp for when the job started running |
|
||||
| `job_ended` | float | | Timestamp for when the job completed or errored out |
|
||||
| `job_sequence` | int | | A counter that is incremented each time a model is dequeued |
|
||||
| `preserve_partial_downloads`| bool | False | Resume partial downloads when relaunched. |
|
||||
| `error` | Exception | | A copy of the Exception that caused an error during download |
|
||||
|
||||
When you create a job, you can assign it a `priority`. If multiple
|
||||
@ -1204,362 +1184,3 @@ other resources that it might have been using.
|
||||
This will start/pause/cancel all jobs that have been submitted to the
|
||||
queue and have not yet reached a terminal state.
|
||||
|
||||
***
|
||||
|
||||
## This Meta be Good: Model Metadata Storage
|
||||
|
||||
The modules found under `invokeai.backend.model_manager.metadata`
|
||||
provide a straightforward API for fetching model metadatda from online
|
||||
repositories. Currently two repositories are supported: HuggingFace
|
||||
and Civitai. However, the modules are easily extended for additional
|
||||
repos, provided that they have defined APIs for metadata access.
|
||||
|
||||
Metadata comprises any descriptive information that is not essential
|
||||
for getting the model to run. For example "author" is metadata, while
|
||||
"type", "base" and "format" are not. The latter fields are part of the
|
||||
model's config, as defined in `invokeai.backend.model_manager.config`.
|
||||
|
||||
### Example Usage:
|
||||
|
||||
```
|
||||
from invokeai.backend.model_manager.metadata import (
|
||||
AnyModelRepoMetadata,
|
||||
CivitaiMetadataFetch,
|
||||
CivitaiMetadata
|
||||
ModelMetadataStore,
|
||||
)
|
||||
# to access the initialized sql database
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
|
||||
civitai = CivitaiMetadataFetch()
|
||||
|
||||
# fetch the metadata
|
||||
model_metadata = civitai.from_url("https://civitai.com/models/215796")
|
||||
|
||||
# get some common metadata fields
|
||||
author = model_metadata.author
|
||||
tags = model_metadata.tags
|
||||
|
||||
# get some Civitai-specific fields
|
||||
assert isinstance(model_metadata, CivitaiMetadata)
|
||||
|
||||
trained_words = model_metadata.trained_words
|
||||
base_model = model_metadata.base_model_trained_on
|
||||
thumbnail = model_metadata.thumbnail_url
|
||||
|
||||
# cache the metadata to the database using the key corresponding to
|
||||
# an existing model config record in the `model_config` table
|
||||
sql_cache = ModelMetadataStore(ApiDependencies.invoker.services.db)
|
||||
sql_cache.add_metadata('fb237ace520b6716adc98bcb16e8462c', model_metadata)
|
||||
|
||||
# now we can search the database by tag, author or model name
|
||||
# matches will contain a list of model keys that match the search
|
||||
matches = sql_cache.search_by_tag({"tool", "turbo"})
|
||||
```
|
||||
|
||||
### Structure of the Metadata objects
|
||||
|
||||
There is a short class hierarchy of Metadata objects, all of which
|
||||
descend from the Pydantic `BaseModel`.
|
||||
|
||||
#### `ModelMetadataBase`
|
||||
|
||||
This is the common base class for metadata:
|
||||
|
||||
| **Field Name** | **Type** | **Description** |
|
||||
|----------------|-----------------|------------------|
|
||||
| `name` | str | Repository's name for the model |
|
||||
| `author` | str | Model's author |
|
||||
| `tags` | Set[str] | Model tags |
|
||||
|
||||
|
||||
Note that the model config record also has a `name` field. It is
|
||||
intended that the config record version be locally customizable, while
|
||||
the metadata version is read-only. However, enforcing this is expected
|
||||
to be part of the business logic.
|
||||
|
||||
Descendents of the base add additional fields.
|
||||
|
||||
#### `HuggingFaceMetadata`
|
||||
|
||||
This descends from `ModelMetadataBase` and adds the following fields:
|
||||
|
||||
| **Field Name** | **Type** | **Description** |
|
||||
|----------------|-----------------|------------------|
|
||||
| `type` | Literal["huggingface"] | Used for the discriminated union of metadata classes|
|
||||
| `id` | str | HuggingFace repo_id |
|
||||
| `tag_dict` | Dict[str, Any] | A dictionary of tag/value pairs provided in addition to `tags` |
|
||||
| `last_modified`| datetime | Date of last commit of this model to the repo |
|
||||
| `files` | List[Path] | List of the files in the model repo |
|
||||
|
||||
|
||||
#### `CivitaiMetadata`
|
||||
|
||||
This descends from `ModelMetadataBase` and adds the following fields:
|
||||
|
||||
| **Field Name** | **Type** | **Description** |
|
||||
|----------------|-----------------|------------------|
|
||||
| `type` | Literal["civitai"] | Used for the discriminated union of metadata classes|
|
||||
| `id` | int | Civitai model id |
|
||||
| `version_name` | str | Name of this version of the model (distinct from model name) |
|
||||
| `version_id` | int | Civitai model version id (distinct from model id) |
|
||||
| `created` | datetime | Date this version of the model was created |
|
||||
| `updated` | datetime | Date this version of the model was last updated |
|
||||
| `published` | datetime | Date this version of the model was published to Civitai |
|
||||
| `description` | str | Model description. Quite verbose and contains HTML tags |
|
||||
| `version_description` | str | Model version description, usually describes changes to the model |
|
||||
| `nsfw` | bool | Whether the model tends to generate NSFW content |
|
||||
| `restrictions` | LicenseRestrictions | An object that describes what is and isn't allowed with this model |
|
||||
| `trained_words`| Set[str] | Trigger words for this model, if any |
|
||||
| `download_url` | AnyHttpUrl | URL for downloading this version of the model |
|
||||
| `base_model_trained_on` | str | Name of the model that this version was trained on |
|
||||
| `thumbnail_url` | AnyHttpUrl | URL to access a representative thumbnail image of the model's output |
|
||||
| `weight_min` | int | For LoRA sliders, the minimum suggested weight to apply |
|
||||
| `weight_max` | int | For LoRA sliders, the maximum suggested weight to apply |
|
||||
|
||||
Note that `weight_min` and `weight_max` are not currently populated
|
||||
and take the default values of (-1.0, +2.0). The issue is that these
|
||||
values aren't part of the structured data but appear in the text
|
||||
description. Some regular expression or LLM coding may be able to
|
||||
extract these values.
|
||||
|
||||
Also be aware that `base_model_trained_on` is free text and doesn't
|
||||
correspond to our `ModelType` enum.
|
||||
|
||||
`CivitaiMetadata` also defines some convenience properties relating to
|
||||
licensing restrictions: `credit_required`, `allow_commercial_use`,
|
||||
`allow_derivatives` and `allow_different_license`.
|
||||
|
||||
#### `AnyModelRepoMetadata`
|
||||
|
||||
This is a discriminated Union of `CivitaiMetadata` and
|
||||
`HuggingFaceMetadata`.
|
||||
|
||||
### Fetching Metadata from Online Repos
|
||||
|
||||
The `HuggingFaceMetadataFetch` and `CivitaiMetadataFetch` classes will
|
||||
retrieve metadata from their corresponding repositories and return
|
||||
`AnyModelRepoMetadata` objects. Their base class
|
||||
`ModelMetadataFetchBase` is an abstract class that defines two
|
||||
methods: `from_url()` and `from_id()`. The former accepts the type of
|
||||
model URLs that the user will try to cut and paste into the model
|
||||
import form. The latter accepts a string ID in the format recognized
|
||||
by the repository of choice. Both methods return an
|
||||
`AnyModelRepoMetadata`.
|
||||
|
||||
The base class also has a class method `from_json()` which will take
|
||||
the JSON representation of a `ModelMetadata` object, validate it, and
|
||||
return the corresponding `AnyModelRepoMetadata` object.
|
||||
|
||||
When initializing one of the metadata fetching classes, you may
|
||||
provide a `requests.Session` argument. This allows you to customize
|
||||
the low-level HTTP fetch requests and is used, for instance, in the
|
||||
testing suite to avoid hitting the internet.
|
||||
|
||||
The HuggingFace and Civitai fetcher subclasses add additional
|
||||
repo-specific fetching methods:
|
||||
|
||||
|
||||
#### HuggingFaceMetadataFetch
|
||||
|
||||
This overrides its base class `from_json()` method to return a
|
||||
`HuggingFaceMetadata` object directly.
|
||||
|
||||
#### CivitaiMetadataFetch
|
||||
|
||||
This adds the following methods:
|
||||
|
||||
`from_civitai_modelid()` This takes the ID of a model, finds the
|
||||
default version of the model, and then retrieves the metadata for
|
||||
that version, returning a `CivitaiMetadata` object directly.
|
||||
|
||||
`from_civitai_versionid()` This takes the ID of a model version and
|
||||
retrieves its metadata. Functionally equivalent to `from_id()`, the
|
||||
only difference is that it returna a `CivitaiMetadata` object rather
|
||||
than an `AnyModelRepoMetadata`.
|
||||
|
||||
|
||||
### Metadata Storage
|
||||
|
||||
The `ModelMetadataStore` provides a simple facility to store model
|
||||
metadata in the `invokeai.db` database. The data is stored as a JSON
|
||||
blob, with a few common fields (`name`, `author`, `tags`) broken out
|
||||
to be searchable.
|
||||
|
||||
When a metadata object is saved to the database, it is identified
|
||||
using the model key, _and this key must correspond to an existing
|
||||
model key in the model_config table_. There is a foreign key integrity
|
||||
constraint between the `model_config.id` field and the
|
||||
`model_metadata.id` field such that if you attempt to save metadata
|
||||
under an unknown key, the attempt will result in an
|
||||
`UnknownModelException`. Likewise, when a model is deleted from
|
||||
`model_config`, the deletion of the corresponding metadata record will
|
||||
be triggered.
|
||||
|
||||
Tags are stored in a normalized fashion in the tables `model_tags` and
|
||||
`tags`. Triggers keep the tag table in sync with the `model_metadata`
|
||||
table.
|
||||
|
||||
To create the storage object, initialize it with the InvokeAI
|
||||
`SqliteDatabase` object. This is often done this way:
|
||||
|
||||
```
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
metadata_store = ModelMetadataStore(ApiDependencies.invoker.services.db)
|
||||
```
|
||||
|
||||
You can then access the storage with the following methods:
|
||||
|
||||
#### `add_metadata(key, metadata)`
|
||||
|
||||
Add the metadata using a previously-defined model key.
|
||||
|
||||
There is currently no `delete_metadata()` method. The metadata will
|
||||
persist until the matching config is deleted from the `model_config`
|
||||
table.
|
||||
|
||||
#### `get_metadata(key) -> AnyModelRepoMetadata`
|
||||
|
||||
Retrieve the metadata corresponding to the model key.
|
||||
|
||||
#### `update_metadata(key, new_metadata)`
|
||||
|
||||
Update an existing metadata record with new metadata.
|
||||
|
||||
#### `search_by_tag(tags: Set[str]) -> Set[str]`
|
||||
|
||||
Given a set of tags, find models that are tagged with them. If
|
||||
multiple tags are provided then a matching model must be tagged with
|
||||
*all* the tags in the set. This method returns a set of model keys and
|
||||
is intended to be used in conjunction with the `ModelRecordService`:
|
||||
|
||||
```
|
||||
model_config_store = ApiDependencies.invoker.services.model_records
|
||||
matches = metadata_store.search_by_tag({'license:other'})
|
||||
models = [model_config_store.get(x) for x in matches]
|
||||
```
|
||||
|
||||
#### `search_by_name(name: str) -> Set[str]
|
||||
|
||||
Find all model metadata records that have the given name and return a
|
||||
set of keys to the corresponding model config objects.
|
||||
|
||||
#### `search_by_author(author: str) -> Set[str]
|
||||
|
||||
Find all model metadata records that have the given author and return
|
||||
a set of keys to the corresponding model config objects.
|
||||
|
||||
# The remainder of this documentation is provisional, pending implementation of the Load service
|
||||
|
||||
## Let's get loaded, the lowdown on ModelLoadService
|
||||
|
||||
The `ModelLoadService` is responsible for loading a named model into
|
||||
memory so that it can be used for inference. Despite the fact that it
|
||||
does a lot under the covers, it is very straightforward to use.
|
||||
|
||||
An application-wide model loader is created at API initialization time
|
||||
and stored in
|
||||
`ApiDependencies.invoker.services.model_loader`. However, you can
|
||||
create alternative instances if you wish.
|
||||
|
||||
### Creating a ModelLoadService object
|
||||
|
||||
The class is defined in
|
||||
`invokeai.app.services.model_loader_service`. It is initialized with
|
||||
an InvokeAIAppConfig object, from which it gets configuration
|
||||
information such as the user's desired GPU and precision, and with a
|
||||
previously-created `ModelRecordServiceBase` object, from which it
|
||||
loads the requested model's configuration information.
|
||||
|
||||
Here is a typical initialization pattern:
|
||||
|
||||
```
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.model_record_service import ModelRecordServiceBase
|
||||
from invokeai.app.services.model_loader_service import ModelLoadService
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
store = ModelRecordServiceBase.open(config)
|
||||
loader = ModelLoadService(config, store)
|
||||
```
|
||||
|
||||
Note that we are relying on the contents of the application
|
||||
configuration to choose the implementation of
|
||||
`ModelRecordServiceBase`.
|
||||
|
||||
### get_model(key, [submodel_type], [context]) -> ModelInfo:
|
||||
|
||||
*** TO DO: change to get_model(key, context=None, **kwargs)
|
||||
|
||||
The `get_model()` method, like its similarly-named cousin in
|
||||
`ModelRecordService`, receives the unique key that identifies the
|
||||
model. It loads the model into memory, gets the model ready for use,
|
||||
and returns a `ModelInfo` object.
|
||||
|
||||
The optional second argument, `subtype` is a `SubModelType` string
|
||||
enum, such as "vae". It is mandatory when used with a main model, and
|
||||
is used to select which part of the main model to load.
|
||||
|
||||
The optional third argument, `context` can be provided by
|
||||
an invocation to trigger model load event reporting. See below for
|
||||
details.
|
||||
|
||||
The returned `ModelInfo` object shares some fields in common with
|
||||
`ModelConfigBase`, but is otherwise a completely different beast:
|
||||
|
||||
| **Field Name** | **Type** | **Description** |
|
||||
|----------------|-----------------|------------------|
|
||||
| `key` | str | The model key derived from the ModelRecordService database |
|
||||
| `name` | str | Name of this model |
|
||||
| `base_model` | BaseModelType | Base model for this model |
|
||||
| `type` | ModelType or SubModelType | Either the model type (non-main) or the submodel type (main models)|
|
||||
| `location` | Path or str | Location of the model on the filesystem |
|
||||
| `precision` | torch.dtype | The torch.precision to use for inference |
|
||||
| `context` | ModelCache.ModelLocker | A context class used to lock the model in VRAM while in use |
|
||||
|
||||
The types for `ModelInfo` and `SubModelType` can be imported from
|
||||
`invokeai.app.services.model_loader_service`.
|
||||
|
||||
To use the model, you use the `ModelInfo` as a context manager using
|
||||
the following pattern:
|
||||
|
||||
```
|
||||
model_info = loader.get_model('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae'))
|
||||
with model_info as vae:
|
||||
image = vae.decode(latents)[0]
|
||||
```
|
||||
|
||||
The `vae` model will stay locked in the GPU during the period of time
|
||||
it is in the context manager's scope.
|
||||
|
||||
`get_model()` may raise any of the following exceptions:
|
||||
|
||||
- `UnknownModelException` -- key not in database
|
||||
- `ModelNotFoundException` -- key in database but model not found at path
|
||||
- `InvalidModelException` -- the model is guilty of a variety of sins
|
||||
|
||||
** TO DO: ** Resolve discrepancy between ModelInfo.location and
|
||||
ModelConfig.path.
|
||||
|
||||
### Emitting model loading events
|
||||
|
||||
When the `context` argument is passed to `get_model()`, it will
|
||||
retrieve the invocation event bus from the passed `InvocationContext`
|
||||
object to emit events on the invocation bus. The two events are
|
||||
"model_load_started" and "model_load_completed". Both carry the
|
||||
following payload:
|
||||
|
||||
```
|
||||
payload=dict(
|
||||
queue_id=queue_id,
|
||||
queue_item_id=queue_item_id,
|
||||
queue_batch_id=queue_batch_id,
|
||||
graph_execution_state_id=graph_execution_state_id,
|
||||
model_key=model_key,
|
||||
submodel=submodel,
|
||||
hash=model_info.hash,
|
||||
location=str(model_info.location),
|
||||
precision=str(model_info.precision),
|
||||
)
|
||||
```
|
||||
|
||||
|
@ -0,0 +1,75 @@
|
||||
# Contributing to the Frontend
|
||||
|
||||
# InvokeAI Web UI
|
||||
|
||||
- [InvokeAI Web UI](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#invokeai-web-ui)
|
||||
- [Stack](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#stack)
|
||||
- [Contributing](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#contributing)
|
||||
- [Dev Environment](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#dev-environment)
|
||||
- [Production builds](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#production-builds)
|
||||
|
||||
The UI is a fairly straightforward Typescript React app, with the Unified Canvas being more complex.
|
||||
|
||||
Code is located in `invokeai/frontend/web/` for review.
|
||||
|
||||
## Stack
|
||||
|
||||
State management is Redux via [Redux Toolkit](https://github.com/reduxjs/redux-toolkit). We lean heavily on RTK:
|
||||
|
||||
- `createAsyncThunk` for HTTP requests
|
||||
- `createEntityAdapter` for fetching images and models
|
||||
- `createListenerMiddleware` for workflows
|
||||
|
||||
The API client and associated types are generated from the OpenAPI schema. See API_CLIENT.md.
|
||||
|
||||
Communication with server is a mix of HTTP and [socket.io](https://github.com/socketio/socket.io-client) (with a simple socket.io redux middleware to help).
|
||||
|
||||
[Chakra-UI](https://github.com/chakra-ui/chakra-ui) & [Mantine](https://github.com/mantinedev/mantine) for components and styling.
|
||||
|
||||
[Konva](https://github.com/konvajs/react-konva) for the canvas, but we are pushing the limits of what is feasible with it (and HTML canvas in general). We plan to rebuild it with [PixiJS](https://github.com/pixijs/pixijs) to take advantage of WebGL's improved raster handling.
|
||||
|
||||
[Vite](https://vitejs.dev/) for bundling.
|
||||
|
||||
Localisation is via [i18next](https://github.com/i18next/react-i18next), but translation happens on our [Weblate](https://hosted.weblate.org/engage/invokeai/) project. Only the English source strings should be changed on this repo.
|
||||
|
||||
## Contributing
|
||||
|
||||
Thanks for your interest in contributing to the InvokeAI Web UI!
|
||||
|
||||
We encourage you to ping @psychedelicious and @blessedcoolant on [Discord](https://discord.gg/ZmtBAhwWhy) if you want to contribute, just to touch base and ensure your work doesn't conflict with anything else going on. The project is very active.
|
||||
|
||||
### Dev Environment
|
||||
|
||||
**Setup**
|
||||
|
||||
1. Install [node](https://nodejs.org/en/download/). You can confirm node is installed with:
|
||||
```bash
|
||||
node --version
|
||||
```
|
||||
2. Install [yarn classic](https://classic.yarnpkg.com/lang/en/) and confirm it is installed by running this:
|
||||
```bash
|
||||
npm install --global yarn
|
||||
yarn --version
|
||||
```
|
||||
|
||||
From `invokeai/frontend/web/` run `yarn install` to get everything set up.
|
||||
|
||||
Start everything in dev mode:
|
||||
1. Ensure your virtual environment is running
|
||||
2. Start the dev server: `yarn dev`
|
||||
3. Start the InvokeAI Nodes backend: `python scripts/invokeai-web.py # run from the repo root`
|
||||
4. Point your browser to the dev server address e.g. [http://localhost:5173/](http://localhost:5173/)
|
||||
|
||||
### VSCode Remote Dev
|
||||
|
||||
We've noticed an intermittent issue with the VSCode Remote Dev port forwarding. If you use this feature of VSCode, you may intermittently click the Invoke button and then get nothing until the request times out. Suggest disabling the IDE's port forwarding feature and doing it manually via SSH:
|
||||
|
||||
`ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@host`
|
||||
|
||||
### Production builds
|
||||
|
||||
For a number of technical and logistical reasons, we need to commit UI build artefacts to the repo.
|
||||
|
||||
If you submit a PR, there is a good chance we will ask you to include a separate commit with a build of the app.
|
||||
|
||||
To build for production, run `yarn build`.
|
@ -12,7 +12,7 @@ To get started, take a look at our [new contributors checklist](newContributorCh
|
||||
Once you're setup, for more information, you can review the documentation specific to your area of interest:
|
||||
|
||||
* #### [InvokeAI Architecure](../ARCHITECTURE.md)
|
||||
* #### [Frontend Documentation](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web)
|
||||
* #### [Frontend Documentation](./contributingToFrontend.md)
|
||||
* #### [Node Documentation](../INVOCATIONS.md)
|
||||
* #### [Local Development](../LOCAL_DEVELOPMENT.md)
|
||||
|
||||
|
@ -1,53 +0,0 @@
|
||||
## :octicons-log-16: Important Changes Since Version 2.3
|
||||
|
||||
### Nodes
|
||||
|
||||
Behind the scenes, InvokeAI has been completely rewritten to support
|
||||
"nodes," small unitary operations that can be combined into graphs to
|
||||
form arbitrary workflows. For example, there is a prompt node that
|
||||
processes the prompt string and feeds it to a text2latent node that
|
||||
generates a latent image. The latents are then fed to a latent2image
|
||||
node that translates the latent image into a PNG.
|
||||
|
||||
The WebGUI has a node editor that allows you to graphically design and
|
||||
execute custom node graphs. The ability to save and load graphs is
|
||||
still a work in progress, but coming soon.
|
||||
|
||||
### Command-Line Interface Retired
|
||||
|
||||
All "invokeai" command-line interfaces have been retired as of version
|
||||
3.4.
|
||||
|
||||
To launch the Web GUI from the command-line, use the command
|
||||
`invokeai-web` rather than the traditional `invokeai --web`.
|
||||
|
||||
### ControlNet
|
||||
|
||||
This version of InvokeAI features ControlNet, a system that allows you
|
||||
to achieve exact poses for human and animal figures by providing a
|
||||
model to follow. Full details are found in [ControlNet](features/CONTROLNET.md)
|
||||
|
||||
### New Schedulers
|
||||
|
||||
The list of schedulers has been completely revamped and brought up to date:
|
||||
|
||||
| **Short Name** | **Scheduler** | **Notes** |
|
||||
|----------------|---------------------------------|-----------------------------|
|
||||
| **ddim** | DDIMScheduler | |
|
||||
| **ddpm** | DDPMScheduler | |
|
||||
| **deis** | DEISMultistepScheduler | |
|
||||
| **lms** | LMSDiscreteScheduler | |
|
||||
| **pndm** | PNDMScheduler | |
|
||||
| **heun** | HeunDiscreteScheduler | original noise schedule |
|
||||
| **heun_k** | HeunDiscreteScheduler | using karras noise schedule |
|
||||
| **euler** | EulerDiscreteScheduler | original noise schedule |
|
||||
| **euler_k** | EulerDiscreteScheduler | using karras noise schedule |
|
||||
| **kdpm_2** | KDPM2DiscreteScheduler | |
|
||||
| **kdpm_2_a** | KDPM2AncestralDiscreteScheduler | |
|
||||
| **dpmpp_2s** | DPMSolverSinglestepScheduler | |
|
||||
| **dpmpp_2m** | DPMSolverMultistepScheduler | original noise scnedule |
|
||||
| **dpmpp_2m_k** | DPMSolverMultistepScheduler | using karras noise schedule |
|
||||
| **unipc** | UniPCMultistepScheduler | CPU only |
|
||||
| **lcm** | LCMScheduler | |
|
||||
|
||||
Please see [3.0.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v3.0.0) for further details.
|
@ -229,28 +229,29 @@ clarity on the intent and common use cases we expect for utilizing them.
|
||||
currently being rendered by your browser into a merged copy of the image. This
|
||||
lowers the resource requirements and should improve performance.
|
||||
|
||||
### Compositing / Seam Correction
|
||||
### Seam Correction
|
||||
|
||||
When doing Inpainting or Outpainting, Invoke needs to merge the pixels generated
|
||||
by Stable Diffusion into your existing image. This is achieved through compositing - the area around the the boundary between your image and the new generation is
|
||||
by Stable Diffusion into your existing image. To do this, the area around the
|
||||
`seam` at the boundary between your image and the new generation is
|
||||
automatically blended to produce a seamless output. In a fully automatic
|
||||
process, a mask is generated to cover the boundary, and then the area of the boundary is
|
||||
process, a mask is generated to cover the seam, and then the area of the seam is
|
||||
Inpainted.
|
||||
|
||||
Although the default options should work well most of the time, sometimes it can
|
||||
help to alter the parameters that control the Compositing. A larger blur and
|
||||
a blur setting have been noted as producing
|
||||
consistently strong results . Strength of 0.7 is best for reducing hard seams.
|
||||
|
||||
- **Mode** - What part of the image will have the the Compositing applied to it.
|
||||
- **Mask edge** will apply Compositing to the edge of the masked area
|
||||
- **Mask** will apply Compositing to the entire masked area
|
||||
- **Unmasked** will apply Compositing to the entire image
|
||||
- **Steps** - Number of generation steps that will occur during the Coherence Pass, similar to Denoising Steps. Higher step counts will generally have better results.
|
||||
- **Strength** - How much noise is added for the Coherence Pass, similar to Denoising Strength. A strength of 0 will result in an unchanged image, while a strength of 1 will result in an image with a completely new area as defined by the Mode setting.
|
||||
- **Blur** - Adjusts the pixel radius of the the mask. A larger blur radius will cause the mask to extend past the visibly masked area, while too small of a blur radius will result in a mask that is smaller than the visibly masked area.
|
||||
- **Blur Method** - The method of blur applied to the masked area.
|
||||
help to alter the parameters that control the seam Inpainting. A wider seam and
|
||||
a blur setting of about 1/3 of the seam have been noted as producing
|
||||
consistently strong results (e.g. 96 wide and 16 blur - adds up to 32 blur with
|
||||
both sides). Seam strength of 0.7 is best for reducing hard seams.
|
||||
|
||||
- **Seam Size** - The size of the seam masked area. Set higher to make a larger
|
||||
mask around the seam.
|
||||
- **Seam Blur** - The size of the blur that is applied on _each_ side of the
|
||||
masked area.
|
||||
- **Seam Strength** - The Image To Image Strength parameter used for the
|
||||
Inpainting generation that is applied to the seam area.
|
||||
- **Seam Steps** - The number of generation steps that should be used to Inpaint
|
||||
the seam.
|
||||
|
||||
### Infill & Scaling
|
||||
|
||||
|
@ -18,7 +18,7 @@ title: Home
|
||||
width: 100%;
|
||||
max-width: 100%;
|
||||
height: 50px;
|
||||
background-color: #35A4DB;
|
||||
background-color: #448AFF;
|
||||
color: #fff;
|
||||
font-size: 16px;
|
||||
border: none;
|
||||
@ -43,7 +43,7 @@ title: Home
|
||||
<div align="center" markdown>
|
||||
|
||||
|
||||
[](https://github.com/invoke-ai/InvokeAI)
|
||||
[](https://github.com/invoke-ai/InvokeAI)
|
||||
|
||||
[![discord badge]][discord link]
|
||||
|
||||
@ -145,6 +145,60 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB of RAM.
|
||||
- [Guide to InvokeAI Runtime Settings](features/CONFIGURATION.md)
|
||||
- [Database Maintenance and other Command Line Utilities](features/UTILITIES.md)
|
||||
|
||||
## :octicons-log-16: Important Changes Since Version 2.3
|
||||
|
||||
### Nodes
|
||||
|
||||
Behind the scenes, InvokeAI has been completely rewritten to support
|
||||
"nodes," small unitary operations that can be combined into graphs to
|
||||
form arbitrary workflows. For example, there is a prompt node that
|
||||
processes the prompt string and feeds it to a text2latent node that
|
||||
generates a latent image. The latents are then fed to a latent2image
|
||||
node that translates the latent image into a PNG.
|
||||
|
||||
The WebGUI has a node editor that allows you to graphically design and
|
||||
execute custom node graphs. The ability to save and load graphs is
|
||||
still a work in progress, but coming soon.
|
||||
|
||||
### Command-Line Interface Retired
|
||||
|
||||
All "invokeai" command-line interfaces have been retired as of version
|
||||
3.4.
|
||||
|
||||
To launch the Web GUI from the command-line, use the command
|
||||
`invokeai-web` rather than the traditional `invokeai --web`.
|
||||
|
||||
### ControlNet
|
||||
|
||||
This version of InvokeAI features ControlNet, a system that allows you
|
||||
to achieve exact poses for human and animal figures by providing a
|
||||
model to follow. Full details are found in [ControlNet](features/CONTROLNET.md)
|
||||
|
||||
### New Schedulers
|
||||
|
||||
The list of schedulers has been completely revamped and brought up to date:
|
||||
|
||||
| **Short Name** | **Scheduler** | **Notes** |
|
||||
|----------------|---------------------------------|-----------------------------|
|
||||
| **ddim** | DDIMScheduler | |
|
||||
| **ddpm** | DDPMScheduler | |
|
||||
| **deis** | DEISMultistepScheduler | |
|
||||
| **lms** | LMSDiscreteScheduler | |
|
||||
| **pndm** | PNDMScheduler | |
|
||||
| **heun** | HeunDiscreteScheduler | original noise schedule |
|
||||
| **heun_k** | HeunDiscreteScheduler | using karras noise schedule |
|
||||
| **euler** | EulerDiscreteScheduler | original noise schedule |
|
||||
| **euler_k** | EulerDiscreteScheduler | using karras noise schedule |
|
||||
| **kdpm_2** | KDPM2DiscreteScheduler | |
|
||||
| **kdpm_2_a** | KDPM2AncestralDiscreteScheduler | |
|
||||
| **dpmpp_2s** | DPMSolverSinglestepScheduler | |
|
||||
| **dpmpp_2m** | DPMSolverMultistepScheduler | original noise scnedule |
|
||||
| **dpmpp_2m_k** | DPMSolverMultistepScheduler | using karras noise schedule |
|
||||
| **unipc** | UniPCMultistepScheduler | CPU only |
|
||||
| **lcm** | LCMScheduler | |
|
||||
|
||||
Please see [3.0.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v3.0.0) for further details.
|
||||
|
||||
## :material-target: Troubleshooting
|
||||
|
||||
Please check out our **[:material-frequently-asked-questions:
|
||||
|
@ -293,19 +293,6 @@ manager, please follow these steps:
|
||||
|
||||
## Developer Install
|
||||
|
||||
!!! warning
|
||||
|
||||
InvokeAI uses a SQLite database. By running on `main`, you accept responsibility for your database. This
|
||||
means making regular backups (especially before pulling) and/or fixing it yourself in the event that a
|
||||
PR introduces a schema change.
|
||||
|
||||
If you don't need persistent backend storage, you can use an ephemeral in-memory database by setting
|
||||
`use_memory_db: true` under `Path:` in your `invokeai.yaml` file.
|
||||
|
||||
If this is untenable, you should run the application via the official installer or a manual install of the
|
||||
python package from pypi. These releases will not break your database.
|
||||
|
||||
|
||||
If you have an interest in how InvokeAI works, or you would like to
|
||||
add features or bugfixes, you are encouraged to install the source
|
||||
code for InvokeAI. For this to work, you will need to install the
|
||||
@ -401,5 +388,3 @@ environment variable INVOKEAI_ROOT to point to the installation directory.
|
||||
|
||||
Note that if you run into problems with the Conda installation, the InvokeAI
|
||||
staff will **not** be able to help you out. Caveat Emptor!
|
||||
|
||||
[dev-chat]: https://discord.com/channels/1020123559063990373/1049495067846524939
|
@ -1,10 +0,0 @@
|
||||
document.addEventListener("DOMContentLoaded", function () {
|
||||
var script = document.createElement("script");
|
||||
script.src = "https://widget.kapa.ai/kapa-widget.bundle.js";
|
||||
script.setAttribute("data-website-id", "b5973bb1-476b-451e-8cf4-98de86745a10");
|
||||
script.setAttribute("data-project-name", "Invoke.AI");
|
||||
script.setAttribute("data-project-color", "#11213C");
|
||||
script.setAttribute("data-project-logo", "https://avatars.githubusercontent.com/u/113954515?s=280&v=4");
|
||||
script.async = true;
|
||||
document.head.appendChild(script);
|
||||
});
|
@ -6,17 +6,10 @@ If you're not familiar with Diffusion, take a look at our [Diffusion Overview.](
|
||||
|
||||
## Features
|
||||
|
||||
### Workflow Library
|
||||
The Workflow Library enables you to save workflows to the Invoke database, allowing you to easily creating, modify and share workflows as needed.
|
||||
|
||||
A curated set of workflows are provided by default - these are designed to help explain important nodes' usage in the Workflow Editor.
|
||||
|
||||

|
||||
|
||||
### Linear View
|
||||
The Workflow Editor allows you to create a UI for your workflow, to make it easier to iterate on your generations.
|
||||
|
||||
To add an input to the Linear UI, right click on the **input label** and select "Add to Linear View".
|
||||
To add an input to the Linear UI, right click on the input label and select "Add to Linear View".
|
||||
|
||||
The Linear UI View will also be part of the saved workflow, allowing you share workflows and enable other to use them, regardless of complexity.
|
||||
|
||||
@ -37,7 +30,7 @@ Any node or input field can be renamed in the workflow editor. If the input fiel
|
||||
Nodes have a "Use Cache" option in their footer. This allows for performance improvements by using the previously cached values during the workflow processing.
|
||||
|
||||
|
||||
## Important Nodes & Concepts
|
||||
## Important Concepts
|
||||
|
||||
There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. Note that the screenshots below aren't examples of complete functioning node graphs (see Examples).
|
||||
|
||||
@ -63,7 +56,7 @@ The ImageToLatents node takes in a pixel image and a VAE and outputs a latents.
|
||||
|
||||
It is common to want to use both the same seed (for continuity) and random seeds (for variety). To define a seed, simply enter it into the 'Seed' field on a noise node. Conversely, the RandomInt node generates a random integer between 'Low' and 'High', and can be used as input to the 'Seed' edge point on a noise node to randomize your seed.
|
||||
|
||||

|
||||

|
||||
|
||||
### ControlNet
|
||||
|
||||
|
@ -13,7 +13,6 @@ If you'd prefer, you can also just download the whole node folder from the linke
|
||||
To use a community workflow, download the the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor.
|
||||
|
||||
- Community Nodes
|
||||
+ [Adapters-Linked](#adapters-linked-nodes)
|
||||
+ [Average Images](#average-images)
|
||||
+ [Clean Image Artifacts After Cut](#clean-image-artifacts-after-cut)
|
||||
+ [Close Color Mask](#close-color-mask)
|
||||
@ -25,6 +24,7 @@ To use a community workflow, download the the `.json` node graph file and load i
|
||||
+ [GPT2RandomPromptMaker](#gpt2randompromptmaker)
|
||||
+ [Grid to Gif](#grid-to-gif)
|
||||
+ [Halftone](#halftone)
|
||||
+ [Ideal Size](#ideal-size)
|
||||
+ [Image and Mask Composition Pack](#image-and-mask-composition-pack)
|
||||
+ [Image Dominant Color](#image-dominant-color)
|
||||
+ [Image to Character Art Image Nodes](#image-to-character-art-image-nodes)
|
||||
@ -32,11 +32,9 @@ To use a community workflow, download the the `.json` node graph file and load i
|
||||
+ [Image Resize Plus](#image-resize-plus)
|
||||
+ [Load Video Frame](#load-video-frame)
|
||||
+ [Make 3D](#make-3d)
|
||||
+ [Mask Operations](#mask-operations)
|
||||
+ [Mask Operations](#mask-operations)
|
||||
+ [Match Histogram](#match-histogram)
|
||||
+ [Metadata-Linked](#metadata-linked-nodes)
|
||||
+ [Negative Image](#negative-image)
|
||||
+ [Nightmare Promptgen](#nightmare-promptgen)
|
||||
+ [Negative Image](#negative-image)
|
||||
+ [Oobabooga](#oobabooga)
|
||||
+ [Prompt Tools](#prompt-tools)
|
||||
+ [Remote Image](#remote-image)
|
||||
@ -53,19 +51,6 @@ To use a community workflow, download the the `.json` node graph file and load i
|
||||
- [Help](#help)
|
||||
|
||||
|
||||
--------------------------------
|
||||
### Adapters Linked Nodes
|
||||
|
||||
**Description:** A set of nodes for linked adapters (ControlNet, IP-Adaptor & T2I-Adapter). This allows multiple adapters to be chained together without using a `collect` node which means it can be used inside an `iterate` node without any collecting on every iteration issues.
|
||||
|
||||
- `ControlNet-Linked` - Collects ControlNet info to pass to other nodes.
|
||||
- `IP-Adapter-Linked` - Collects IP-Adapter info to pass to other nodes.
|
||||
- `T2I-Adapter-Linked` - Collects T2I-Adapter info to pass to other nodes.
|
||||
|
||||
Note: These are inherited from the core nodes so any update to the core nodes should be reflected in these.
|
||||
|
||||
**Node Link:** https://github.com/skunkworxdark/adapters-linked-nodes
|
||||
|
||||
--------------------------------
|
||||
### Average Images
|
||||
|
||||
@ -195,6 +180,13 @@ CMYK Halftone Output:
|
||||
|
||||
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/c59c578f-db8e-4d66-8c66-2851752d75ea" width="300" />
|
||||
|
||||
--------------------------------
|
||||
### Ideal Size
|
||||
|
||||
**Description:** This node calculates an ideal image size for a first pass of a multi-pass upscaling. The aim is to avoid duplication that results from choosing a size larger than the model is capable of.
|
||||
|
||||
**Node Link:** https://github.com/JPPhoto/ideal-size-node
|
||||
|
||||
--------------------------------
|
||||
### Image and Mask Composition Pack
|
||||
|
||||
@ -315,20 +307,6 @@ See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/mai
|
||||
|
||||
<img src="https://github.com/skunkworxdark/match_histogram/assets/21961335/ed12f329-a0ef-444a-9bae-129ed60d6097" width="300" />
|
||||
|
||||
--------------------------------
|
||||
### Metadata Linked Nodes
|
||||
|
||||
**Description:** A set of nodes for Metadata. Collect Metadata from within an `iterate` node & extract metadata from an image.
|
||||
|
||||
- `Metadata Item Linked` - Allows collecting of metadata while within an iterate node with no need for a collect node or conversion to metadata node.
|
||||
- `Metadata From Image` - Provides Metadata from an image.
|
||||
- `Metadata To String` - Extracts a String value of a label from metadata.
|
||||
- `Metadata To Integer` - Extracts an Integer value of a label from metadata.
|
||||
- `Metadata To Float` - Extracts a Float value of a label from metadata.
|
||||
- `Metadata To Scheduler` - Extracts a Scheduler value of a label from metadata.
|
||||
|
||||
**Node Link:** https://github.com/skunkworxdark/metadata-linked-nodes
|
||||
|
||||
--------------------------------
|
||||
### Negative Image
|
||||
|
||||
@ -339,13 +317,6 @@ Node Link: https://github.com/VeyDlin/negative-image-node
|
||||
View:
|
||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/negative-image-node/master/.readme/node.png" width="500" />
|
||||
|
||||
--------------------------------
|
||||
### Nightmare Promptgen
|
||||
|
||||
**Description:** Nightmare Prompt Generator - Uses a local text generation model to create unique imaginative (but usually nightmarish) prompts for InvokeAI. By default, it allows you to choose from some gpt-neo models I finetuned on over 2500 of my own InvokeAI prompts in Compel format, but you're able to add your own, as well. Offers support for replacing any troublesome words with a random choice from list you can also define.
|
||||
|
||||
**Node Link:** [https://github.com/gogurtenjoyer/nightmare-promptgen](https://github.com/gogurtenjoyer/nightmare-promptgen)
|
||||
|
||||
--------------------------------
|
||||
### Oobabooga
|
||||
|
||||
|
@ -36,7 +36,6 @@ their descriptions.
|
||||
| Integer Math | Perform basic math operations on two integers |
|
||||
| Convert Image Mode | Converts an image to a different mode. |
|
||||
| Crop Image | Crops an image to a specified box. The box can be outside of the image. |
|
||||
| Ideal Size | Calculates an ideal image size for latents for a first pass of a multi-pass upscaling to avoid duplication and other artifacts |
|
||||
| Image Hue Adjustment | Adjusts the Hue of an image. |
|
||||
| Inverse Lerp Image | Inverse linear interpolation of all pixels of an image |
|
||||
| Image Primitive | An image primitive value |
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Example Workflows
|
||||
|
||||
We've curated some example workflows for you to get started with Workflows in InvokeAI! These can also be found in the Workflow Library, located in the Workflow Editor of Invoke.
|
||||
We've curated some example workflows for you to get started with Workflows in InvokeAI
|
||||
|
||||
To use them, right click on your desired workflow, follow the link to GitHub and click the "⬇" button to download the raw file. You can then use the "Load Workflow" functionality in InvokeAI to load the workflow and start generating images!
|
||||
|
||||
|
@ -215,7 +215,6 @@ We thank them for all of their time and hard work.
|
||||
- Robert Bolender
|
||||
- Robin Rombach
|
||||
- Rohan Barar
|
||||
- rohinish404
|
||||
- rpagliuca
|
||||
- rromb
|
||||
- Rupesh Sreeraman
|
||||
|
@ -1,5 +0,0 @@
|
||||
:root {
|
||||
--md-primary-fg-color: #35A4DB;
|
||||
--md-primary-fg-color--light: #35A4DB;
|
||||
--md-primary-fg-color--dark: #35A4DB;
|
||||
}
|
@ -1,8 +1,8 @@
|
||||
{
|
||||
"name": "Text to Image - SD1.5",
|
||||
"name": "Text to Image",
|
||||
"author": "InvokeAI",
|
||||
"description": "Sample text to image workflow for Stable Diffusion 1.5/2",
|
||||
"version": "1.1.0",
|
||||
"version": "1.0.1",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "text2image, SD1.5, SD2, default",
|
||||
"notes": "",
|
||||
@ -18,19 +18,10 @@
|
||||
{
|
||||
"nodeId": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||
"fieldName": "prompt"
|
||||
},
|
||||
{
|
||||
"nodeId": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"fieldName": "width"
|
||||
},
|
||||
{
|
||||
"nodeId": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"fieldName": "height"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"category": "default",
|
||||
"version": "2.0.0"
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"nodes": [
|
||||
{
|
||||
@ -39,56 +30,44 @@
|
||||
"data": {
|
||||
"id": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||
"type": "compel",
|
||||
"label": "Negative Compel Prompt",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"prompt": {
|
||||
"id": "7739aff6-26cb-4016-8897-5a1fb2305e4e",
|
||||
"name": "prompt",
|
||||
"type": "string",
|
||||
"fieldKind": "input",
|
||||
"label": "Negative Prompt",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": ""
|
||||
},
|
||||
"clip": {
|
||||
"id": "48d23dce-a6ae-472a-9f8c-22a714ea5ce0",
|
||||
"name": "clip",
|
||||
"type": "ClipField",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"conditioning": {
|
||||
"id": "37cf3a9d-f6b7-4b64-8ff6-2558c5ecc447",
|
||||
"name": "conditioning",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
"type": "ConditioningField",
|
||||
"fieldKind": "output"
|
||||
}
|
||||
}
|
||||
},
|
||||
"label": "Negative Compel Prompt",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"embedWorkflow": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"width": 320,
|
||||
"height": 259,
|
||||
"height": 261,
|
||||
"position": {
|
||||
"x": 1000,
|
||||
"y": 350
|
||||
"x": 995.7263915923627,
|
||||
"y": 239.67783573351227
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -97,60 +76,37 @@
|
||||
"data": {
|
||||
"id": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"type": "noise",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.1",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"seed": {
|
||||
"id": "6431737c-918a-425d-a3b4-5d57e2f35d4d",
|
||||
"name": "seed",
|
||||
"type": "integer",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"width": {
|
||||
"id": "38fc5b66-fe6e-47c8-bba9-daf58e454ed7",
|
||||
"name": "width",
|
||||
"type": "integer",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 512
|
||||
},
|
||||
"height": {
|
||||
"id": "16298330-e2bf-4872-a514-d6923df53cbb",
|
||||
"name": "height",
|
||||
"type": "integer",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 512
|
||||
},
|
||||
"use_cpu": {
|
||||
"id": "c7c436d3-7a7a-4e76-91e4-c6deb271623c",
|
||||
"name": "use_cpu",
|
||||
"type": "boolean",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": true
|
||||
}
|
||||
},
|
||||
@ -158,40 +114,35 @@
|
||||
"noise": {
|
||||
"id": "50f650dc-0184-4e23-a927-0497a96fe954",
|
||||
"name": "noise",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
"type": "LatentsField",
|
||||
"fieldKind": "output"
|
||||
},
|
||||
"width": {
|
||||
"id": "bb8a452b-133d-42d1-ae4a-3843d7e4109a",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
"type": "integer",
|
||||
"fieldKind": "output"
|
||||
},
|
||||
"height": {
|
||||
"id": "35cfaa12-3b8b-4b7a-a884-327ff3abddd9",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
"type": "integer",
|
||||
"fieldKind": "output"
|
||||
}
|
||||
}
|
||||
},
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"embedWorkflow": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"width": 320,
|
||||
"height": 388,
|
||||
"height": 389,
|
||||
"position": {
|
||||
"x": 600,
|
||||
"y": 325
|
||||
"x": 993.4442117555518,
|
||||
"y": 605.6757415334787
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -200,24 +151,13 @@
|
||||
"data": {
|
||||
"id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"type": "main_model_loader",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"model": {
|
||||
"id": "993eabd2-40fd-44fe-bce7-5d0c7075ddab",
|
||||
"name": "model",
|
||||
"type": "MainModelField",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "MainModelField"
|
||||
},
|
||||
"value": {
|
||||
"model_name": "stable-diffusion-v1-5",
|
||||
"base_model": "sd-1",
|
||||
@ -229,40 +169,35 @@
|
||||
"unet": {
|
||||
"id": "5c18c9db-328d-46d0-8cb9-143391c410be",
|
||||
"name": "unet",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
"type": "UNetField",
|
||||
"fieldKind": "output"
|
||||
},
|
||||
"clip": {
|
||||
"id": "6effcac0-ec2f-4bf5-a49e-a2c29cf921f4",
|
||||
"name": "clip",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
"type": "ClipField",
|
||||
"fieldKind": "output"
|
||||
},
|
||||
"vae": {
|
||||
"id": "57683ba3-f5f5-4f58-b9a2-4b83dacad4a1",
|
||||
"name": "vae",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "VaeField"
|
||||
}
|
||||
"type": "VaeField",
|
||||
"fieldKind": "output"
|
||||
}
|
||||
}
|
||||
},
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"embedWorkflow": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"width": 320,
|
||||
"height": 226,
|
||||
"position": {
|
||||
"x": 600,
|
||||
"y": 25
|
||||
"x": 163.04436745878343,
|
||||
"y": 254.63156870373479
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -271,56 +206,44 @@
|
||||
"data": {
|
||||
"id": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||
"type": "compel",
|
||||
"label": "Positive Compel Prompt",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"prompt": {
|
||||
"id": "7739aff6-26cb-4016-8897-5a1fb2305e4e",
|
||||
"name": "prompt",
|
||||
"type": "string",
|
||||
"fieldKind": "input",
|
||||
"label": "Positive Prompt",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": "Super cute tiger cub, national geographic award-winning photograph"
|
||||
"value": ""
|
||||
},
|
||||
"clip": {
|
||||
"id": "48d23dce-a6ae-472a-9f8c-22a714ea5ce0",
|
||||
"name": "clip",
|
||||
"type": "ClipField",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"conditioning": {
|
||||
"id": "37cf3a9d-f6b7-4b64-8ff6-2558c5ecc447",
|
||||
"name": "conditioning",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
"type": "ConditioningField",
|
||||
"fieldKind": "output"
|
||||
}
|
||||
}
|
||||
},
|
||||
"label": "Positive Compel Prompt",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"embedWorkflow": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"width": 320,
|
||||
"height": 259,
|
||||
"height": 261,
|
||||
"position": {
|
||||
"x": 1000,
|
||||
"y": 25
|
||||
"x": 595.7263915923627,
|
||||
"y": 239.67783573351227
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -329,36 +252,21 @@
|
||||
"data": {
|
||||
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||
"type": "rand_int",
|
||||
"label": "Random Seed",
|
||||
"isOpen": false,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": false,
|
||||
"version": "1.0.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"low": {
|
||||
"id": "3ec65a37-60ba-4b6c-a0b2-553dd7a84b84",
|
||||
"name": "low",
|
||||
"type": "integer",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"high": {
|
||||
"id": "085f853a-1a5f-494d-8bec-e4ba29a3f2d1",
|
||||
"name": "high",
|
||||
"type": "integer",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 2147483647
|
||||
}
|
||||
},
|
||||
@ -366,20 +274,23 @@
|
||||
"value": {
|
||||
"id": "812ade4d-7699-4261-b9fc-a6c9d2ab55ee",
|
||||
"name": "value",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
"type": "integer",
|
||||
"fieldKind": "output"
|
||||
}
|
||||
}
|
||||
},
|
||||
"label": "Random Seed",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"embedWorkflow": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": false,
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"width": 320,
|
||||
"height": 32,
|
||||
"height": 218,
|
||||
"position": {
|
||||
"x": 600,
|
||||
"y": 275
|
||||
"x": 541.094822888628,
|
||||
"y": 694.5704476446829
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -388,224 +299,144 @@
|
||||
"data": {
|
||||
"id": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"type": "denoise_latents",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.5.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"positive_conditioning": {
|
||||
"id": "90b7f4f8-ada7-4028-8100-d2e54f192052",
|
||||
"name": "positive_conditioning",
|
||||
"type": "ConditioningField",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"negative_conditioning": {
|
||||
"id": "9393779e-796c-4f64-b740-902a1177bf53",
|
||||
"name": "negative_conditioning",
|
||||
"type": "ConditioningField",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"noise": {
|
||||
"id": "8e17f1e5-4f98-40b1-b7f4-86aeeb4554c1",
|
||||
"name": "noise",
|
||||
"type": "LatentsField",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"steps": {
|
||||
"id": "9b63302d-6bd2-42c9-ac13-9b1afb51af88",
|
||||
"name": "steps",
|
||||
"type": "integer",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 50
|
||||
"value": 10
|
||||
},
|
||||
"cfg_scale": {
|
||||
"id": "87dd04d3-870e-49e1-98bf-af003a810109",
|
||||
"name": "cfg_scale",
|
||||
"type": "FloatPolymorphic",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 7.5
|
||||
},
|
||||
"denoising_start": {
|
||||
"id": "f369d80f-4931-4740-9bcd-9f0620719fab",
|
||||
"name": "denoising_start",
|
||||
"type": "float",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"denoising_end": {
|
||||
"id": "747d10e5-6f02-445c-994c-0604d814de8c",
|
||||
"name": "denoising_end",
|
||||
"type": "float",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 1
|
||||
},
|
||||
"scheduler": {
|
||||
"id": "1de84a4e-3a24-4ec8-862b-16ce49633b9b",
|
||||
"name": "scheduler",
|
||||
"type": "Scheduler",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "SchedulerField"
|
||||
},
|
||||
"value": "unipc"
|
||||
"value": "euler"
|
||||
},
|
||||
"unet": {
|
||||
"id": "ffa6fef4-3ce2-4bdb-9296-9a834849489b",
|
||||
"name": "unet",
|
||||
"type": "UNetField",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"control": {
|
||||
"id": "077b64cb-34be-4fcc-83f2-e399807a02bd",
|
||||
"name": "control",
|
||||
"type": "ControlPolymorphic",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "ControlField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"ip_adapter": {
|
||||
"id": "1d6948f7-3a65-4a65-a20c-768b287251aa",
|
||||
"name": "ip_adapter",
|
||||
"type": "IPAdapterPolymorphic",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "IPAdapterField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"t2i_adapter": {
|
||||
"id": "75e67b09-952f-4083-aaf4-6b804d690412",
|
||||
"name": "t2i_adapter",
|
||||
"type": "T2IAdapterPolymorphic",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "T2IAdapterField"
|
||||
}
|
||||
},
|
||||
"cfg_rescale_multiplier": {
|
||||
"id": "9101f0a6-5fe0-4826-b7b3-47e5d506826c",
|
||||
"name": "cfg_rescale_multiplier",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 0
|
||||
"label": ""
|
||||
},
|
||||
"latents": {
|
||||
"id": "334d4ba3-5a99-4195-82c5-86fb3f4f7d43",
|
||||
"name": "latents",
|
||||
"type": "LatentsField",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"denoise_mask": {
|
||||
"id": "0d3dbdbf-b014-4e95-8b18-ff2ff9cb0bfa",
|
||||
"name": "denoise_mask",
|
||||
"type": "DenoiseMaskField",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "DenoiseMaskField"
|
||||
}
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"latents": {
|
||||
"id": "70fa5bbc-0c38-41bb-861a-74d6d78d2f38",
|
||||
"name": "latents",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
"type": "LatentsField",
|
||||
"fieldKind": "output"
|
||||
},
|
||||
"width": {
|
||||
"id": "98ee0e6c-82aa-4e8f-8be5-dc5f00ee47f0",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
"type": "integer",
|
||||
"fieldKind": "output"
|
||||
},
|
||||
"height": {
|
||||
"id": "e8cb184a-5e1a-47c8-9695-4b8979564f5d",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
"type": "integer",
|
||||
"fieldKind": "output"
|
||||
}
|
||||
}
|
||||
},
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"embedWorkflow": false,
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.4.0"
|
||||
},
|
||||
"width": 320,
|
||||
"height": 703,
|
||||
"height": 646,
|
||||
"position": {
|
||||
"x": 1400,
|
||||
"y": 25
|
||||
"x": 1476.5794704734735,
|
||||
"y": 256.80174342731783
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -614,185 +445,153 @@
|
||||
"data": {
|
||||
"id": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||
"type": "l2i",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": false,
|
||||
"useCache": true,
|
||||
"version": "1.2.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"metadata": {
|
||||
"id": "ab375f12-0042-4410-9182-29e30db82c85",
|
||||
"name": "metadata",
|
||||
"type": "MetadataField",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "MetadataField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"latents": {
|
||||
"id": "3a7e7efd-bff5-47d7-9d48-615127afee78",
|
||||
"name": "latents",
|
||||
"type": "LatentsField",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"vae": {
|
||||
"id": "a1f5f7a1-0795-4d58-b036-7820c0b0ef2b",
|
||||
"name": "vae",
|
||||
"type": "VaeField",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "VaeField"
|
||||
}
|
||||
"label": ""
|
||||
},
|
||||
"tiled": {
|
||||
"id": "da52059a-0cee-4668-942f-519aa794d739",
|
||||
"name": "tiled",
|
||||
"type": "boolean",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": false
|
||||
},
|
||||
"fp32": {
|
||||
"id": "c4841df3-b24e-4140-be3b-ccd454c2522c",
|
||||
"name": "fp32",
|
||||
"type": "boolean",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": true
|
||||
"value": false
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"image": {
|
||||
"id": "72d667d0-cf85-459d-abf2-28bd8b823fe7",
|
||||
"name": "image",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ImageField"
|
||||
}
|
||||
"type": "ImageField",
|
||||
"fieldKind": "output"
|
||||
},
|
||||
"width": {
|
||||
"id": "c8c907d8-1066-49d1-b9a6-83bdcd53addc",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
"type": "integer",
|
||||
"fieldKind": "output"
|
||||
},
|
||||
"height": {
|
||||
"id": "230f359c-b4ea-436c-b372-332d7dcdca85",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
"type": "integer",
|
||||
"fieldKind": "output"
|
||||
}
|
||||
}
|
||||
},
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"embedWorkflow": false,
|
||||
"isIntermediate": false,
|
||||
"useCache": true,
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"width": 320,
|
||||
"height": 266,
|
||||
"height": 267,
|
||||
"position": {
|
||||
"x": 1800,
|
||||
"y": 25
|
||||
"x": 2037.9648469717395,
|
||||
"y": 426.10844427600136
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"id": "reactflow__edge-ea94bc37-d995-4a83-aa99-4af42479f2f2value-55705012-79b9-4aac-9f26-c0b10309785bseed",
|
||||
"source": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||
"target": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"type": "default",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "seed"
|
||||
"target": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"targetHandle": "seed",
|
||||
"id": "reactflow__edge-ea94bc37-d995-4a83-aa99-4af42479f2f2value-55705012-79b9-4aac-9f26-c0b10309785bseed",
|
||||
"type": "default"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8clip-7d8bf987-284f-413a-b2fd-d825445a5d6cclip",
|
||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"sourceHandle": "clip",
|
||||
"target": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||
"type": "default",
|
||||
"sourceHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
"targetHandle": "clip",
|
||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8clip-7d8bf987-284f-413a-b2fd-d825445a5d6cclip",
|
||||
"type": "default"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8clip-93dc02a4-d05b-48ed-b99c-c9b616af3402clip",
|
||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"sourceHandle": "clip",
|
||||
"target": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||
"type": "default",
|
||||
"sourceHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
"targetHandle": "clip",
|
||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8clip-93dc02a4-d05b-48ed-b99c-c9b616af3402clip",
|
||||
"type": "default"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-eea2702a-19fb-45b5-9d75-56b4211ec03cnoise",
|
||||
"source": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"type": "default",
|
||||
"sourceHandle": "noise",
|
||||
"targetHandle": "noise"
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"targetHandle": "noise",
|
||||
"id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-eea2702a-19fb-45b5-9d75-56b4211ec03cnoise",
|
||||
"type": "default"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-7d8bf987-284f-413a-b2fd-d825445a5d6cconditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cpositive_conditioning",
|
||||
"source": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"type": "default",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "positive_conditioning"
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"targetHandle": "positive_conditioning",
|
||||
"id": "reactflow__edge-7d8bf987-284f-413a-b2fd-d825445a5d6cconditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cpositive_conditioning",
|
||||
"type": "default"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-93dc02a4-d05b-48ed-b99c-c9b616af3402conditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cnegative_conditioning",
|
||||
"source": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"type": "default",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "negative_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8unet-eea2702a-19fb-45b5-9d75-56b4211ec03cunet",
|
||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"type": "default",
|
||||
"sourceHandle": "unet",
|
||||
"targetHandle": "unet"
|
||||
"targetHandle": "negative_conditioning",
|
||||
"id": "reactflow__edge-93dc02a4-d05b-48ed-b99c-c9b616af3402conditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cnegative_conditioning",
|
||||
"type": "default"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-eea2702a-19fb-45b5-9d75-56b4211ec03clatents-58c957f5-0d01-41fc-a803-b2bbf0413d4flatents",
|
||||
"source": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||
"type": "default",
|
||||
"sourceHandle": "latents",
|
||||
"targetHandle": "latents"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8vae-58c957f5-0d01-41fc-a803-b2bbf0413d4fvae",
|
||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"sourceHandle": "unet",
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"targetHandle": "unet",
|
||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8unet-eea2702a-19fb-45b5-9d75-56b4211ec03cunet",
|
||||
"type": "default"
|
||||
},
|
||||
{
|
||||
"source": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"sourceHandle": "latents",
|
||||
"target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||
"type": "default",
|
||||
"targetHandle": "latents",
|
||||
"id": "reactflow__edge-eea2702a-19fb-45b5-9d75-56b4211ec03clatents-58c957f5-0d01-41fc-a803-b2bbf0413d4flatents",
|
||||
"type": "default"
|
||||
},
|
||||
{
|
||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"sourceHandle": "vae",
|
||||
"targetHandle": "vae"
|
||||
"target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||
"targetHandle": "vae",
|
||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8vae-58c957f5-0d01-41fc-a803-b2bbf0413d4fvae",
|
||||
"type": "default"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -91,11 +91,9 @@ rm -rf InvokeAI-Installer
|
||||
|
||||
# copy content
|
||||
mkdir InvokeAI-Installer
|
||||
for f in templates *.txt *.reg; do
|
||||
for f in templates lib *.txt *.reg; do
|
||||
cp -r ${f} InvokeAI-Installer/
|
||||
done
|
||||
mkdir InvokeAI-Installer/lib
|
||||
cp lib/*.py InvokeAI-Installer/lib
|
||||
|
||||
# Move the wheel
|
||||
mv dist/*.whl InvokeAI-Installer/lib/
|
||||
@ -113,6 +111,6 @@ cp WinLongPathsEnabled.reg InvokeAI-Installer/
|
||||
zip -r InvokeAI-installer-$VERSION.zip InvokeAI-Installer
|
||||
|
||||
# clean up
|
||||
rm -rf InvokeAI-Installer tmp dist ../invokeai/frontend/web/dist/
|
||||
rm -rf InvokeAI-Installer tmp dist
|
||||
|
||||
exit 0
|
||||
|
@ -241,12 +241,12 @@ class InvokeAiInstance:
|
||||
pip[
|
||||
"install",
|
||||
"--require-virtualenv",
|
||||
"numpy==1.26.3", # choose versions that won't be uninstalled during phase 2
|
||||
"numpy~=1.24.0", # choose versions that won't be uninstalled during phase 2
|
||||
"urllib3~=1.26.0",
|
||||
"requests~=2.28.0",
|
||||
"torch==2.1.2",
|
||||
"torch==2.1.0",
|
||||
"torchmetrics==0.11.4",
|
||||
"torchvision==0.16.2",
|
||||
"torchvision>=0.14.1",
|
||||
"--force-reinstall",
|
||||
"--find-links" if find_links is not None else None,
|
||||
find_links,
|
||||
|
@ -3,7 +3,6 @@
|
||||
from logging import Logger
|
||||
|
||||
from invokeai.app.services.shared.sqlite.sqlite_util import init_db
|
||||
from invokeai.backend.model_manager.metadata import ModelMetadataStore
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
from invokeai.version.invokeai_version import __version__
|
||||
|
||||
@ -12,7 +11,6 @@ from ..services.board_images.board_images_default import BoardImagesService
|
||||
from ..services.board_records.board_records_sqlite import SqliteBoardRecordStorage
|
||||
from ..services.boards.boards_default import BoardService
|
||||
from ..services.config import InvokeAIAppConfig
|
||||
from ..services.download import DownloadQueueService
|
||||
from ..services.image_files.image_files_disk import DiskImageFileStorage
|
||||
from ..services.image_records.image_records_sqlite import SqliteImageRecordStorage
|
||||
from ..services.images.images_default import ImageService
|
||||
@ -31,7 +29,8 @@ from ..services.model_records import ModelRecordServiceSQL
|
||||
from ..services.names.names_default import SimpleNameService
|
||||
from ..services.session_processor.session_processor_default import DefaultSessionProcessor
|
||||
from ..services.session_queue.session_queue_sqlite import SqliteSessionQueue
|
||||
from ..services.shared.graph import GraphExecutionState
|
||||
from ..services.shared.default_graphs import create_system_graphs
|
||||
from ..services.shared.graph import GraphExecutionState, LibraryGraph
|
||||
from ..services.urls.urls_default import LocalUrlService
|
||||
from ..services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage
|
||||
from .events import FastAPIEventService
|
||||
@ -62,7 +61,7 @@ class ApiDependencies:
|
||||
invoker: Invoker
|
||||
|
||||
@staticmethod
|
||||
def initialize(config: InvokeAIAppConfig, event_handler_id: int, logger: Logger = logger) -> None:
|
||||
def initialize(config: InvokeAIAppConfig, event_handler_id: int, logger: Logger = logger):
|
||||
logger.info(f"InvokeAI version {__version__}")
|
||||
logger.info(f"Root directory = {str(config.root_path)}")
|
||||
logger.debug(f"Internet connectivity is {config.internet_available}")
|
||||
@ -81,20 +80,15 @@ class ApiDependencies:
|
||||
boards = BoardService()
|
||||
events = FastAPIEventService(event_handler_id)
|
||||
graph_execution_manager = SqliteItemStorage[GraphExecutionState](db=db, table_name="graph_executions")
|
||||
graph_library = SqliteItemStorage[LibraryGraph](db=db, table_name="graphs")
|
||||
image_records = SqliteImageRecordStorage(db=db)
|
||||
images = ImageService()
|
||||
invocation_cache = MemoryInvocationCache(max_cache_size=config.node_cache_size)
|
||||
latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f"{output_folder}/latents"))
|
||||
model_manager = ModelManagerService(config, logger)
|
||||
model_record_service = ModelRecordServiceSQL(db=db)
|
||||
download_queue_service = DownloadQueueService(event_bus=events)
|
||||
metadata_store = ModelMetadataStore(db=db)
|
||||
model_install_service = ModelInstallService(
|
||||
app_config=config,
|
||||
record_store=model_record_service,
|
||||
download_queue=download_queue_service,
|
||||
metadata_store=metadata_store,
|
||||
event_bus=events,
|
||||
app_config=config, record_store=model_record_service, event_bus=events
|
||||
)
|
||||
names = SimpleNameService()
|
||||
performance_statistics = InvocationStatsService()
|
||||
@ -113,6 +107,7 @@ class ApiDependencies:
|
||||
configuration=configuration,
|
||||
events=events,
|
||||
graph_execution_manager=graph_execution_manager,
|
||||
graph_library=graph_library,
|
||||
image_files=image_files,
|
||||
image_records=image_records,
|
||||
images=images,
|
||||
@ -121,7 +116,6 @@ class ApiDependencies:
|
||||
logger=logger,
|
||||
model_manager=model_manager,
|
||||
model_records=model_record_service,
|
||||
download_queue=download_queue_service,
|
||||
model_install=model_install_service,
|
||||
names=names,
|
||||
performance_statistics=performance_statistics,
|
||||
@ -133,10 +127,12 @@ class ApiDependencies:
|
||||
workflow_records=workflow_records,
|
||||
)
|
||||
|
||||
create_system_graphs(services.graph_library)
|
||||
|
||||
ApiDependencies.invoker = Invoker(services)
|
||||
db.clean()
|
||||
|
||||
@staticmethod
|
||||
def shutdown() -> None:
|
||||
def shutdown():
|
||||
if ApiDependencies.invoker:
|
||||
ApiDependencies.invoker.stop()
|
||||
|
@ -1,28 +0,0 @@
|
||||
from typing import Any
|
||||
|
||||
from starlette.responses import Response
|
||||
from starlette.staticfiles import StaticFiles
|
||||
|
||||
|
||||
class NoCacheStaticFiles(StaticFiles):
|
||||
"""
|
||||
This class is used to override the default caching behavior of starlette for static files,
|
||||
ensuring we *never* cache static files. It modifies the file response headers to strictly
|
||||
never cache the files.
|
||||
|
||||
Static files include the javascript bundles, fonts, locales, and some images. Generated
|
||||
images are not included, as they are served by a router.
|
||||
"""
|
||||
|
||||
def __init__(self, *args: Any, **kwargs: Any):
|
||||
self.cachecontrol = "max-age=0, no-cache, no-store, , must-revalidate"
|
||||
self.pragma = "no-cache"
|
||||
self.expires = "0"
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def file_response(self, *args: Any, **kwargs: Any) -> Response:
|
||||
resp = super().file_response(*args, **kwargs)
|
||||
resp.headers.setdefault("Cache-Control", self.cachecontrol)
|
||||
resp.headers.setdefault("Pragma", self.pragma)
|
||||
resp.headers.setdefault("Expires", self.expires)
|
||||
return resp
|
@ -1,111 +0,0 @@
|
||||
# Copyright (c) 2023 Lincoln D. Stein
|
||||
"""FastAPI route for the download queue."""
|
||||
|
||||
from typing import List, Optional
|
||||
|
||||
from fastapi import Body, Path, Response
|
||||
from fastapi.routing import APIRouter
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
from starlette.exceptions import HTTPException
|
||||
|
||||
from invokeai.app.services.download import (
|
||||
DownloadJob,
|
||||
UnknownJobIDException,
|
||||
)
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
|
||||
download_queue_router = APIRouter(prefix="/v1/download_queue", tags=["download_queue"])
|
||||
|
||||
|
||||
@download_queue_router.get(
|
||||
"/",
|
||||
operation_id="list_downloads",
|
||||
)
|
||||
async def list_downloads() -> List[DownloadJob]:
|
||||
"""Get a list of active and inactive jobs."""
|
||||
queue = ApiDependencies.invoker.services.download_queue
|
||||
return queue.list_jobs()
|
||||
|
||||
|
||||
@download_queue_router.patch(
|
||||
"/",
|
||||
operation_id="prune_downloads",
|
||||
responses={
|
||||
204: {"description": "All completed jobs have been pruned"},
|
||||
400: {"description": "Bad request"},
|
||||
},
|
||||
)
|
||||
async def prune_downloads():
|
||||
"""Prune completed and errored jobs."""
|
||||
queue = ApiDependencies.invoker.services.download_queue
|
||||
queue.prune_jobs()
|
||||
return Response(status_code=204)
|
||||
|
||||
|
||||
@download_queue_router.post(
|
||||
"/i/",
|
||||
operation_id="download",
|
||||
)
|
||||
async def download(
|
||||
source: AnyHttpUrl = Body(description="download source"),
|
||||
dest: str = Body(description="download destination"),
|
||||
priority: int = Body(default=10, description="queue priority"),
|
||||
access_token: Optional[str] = Body(default=None, description="token for authorization to download"),
|
||||
) -> DownloadJob:
|
||||
"""Download the source URL to the file or directory indicted in dest."""
|
||||
queue = ApiDependencies.invoker.services.download_queue
|
||||
return queue.download(source, dest, priority, access_token)
|
||||
|
||||
|
||||
@download_queue_router.get(
|
||||
"/i/{id}",
|
||||
operation_id="get_download_job",
|
||||
responses={
|
||||
200: {"description": "Success"},
|
||||
404: {"description": "The requested download JobID could not be found"},
|
||||
},
|
||||
)
|
||||
async def get_download_job(
|
||||
id: int = Path(description="ID of the download job to fetch."),
|
||||
) -> DownloadJob:
|
||||
"""Get a download job using its ID."""
|
||||
try:
|
||||
job = ApiDependencies.invoker.services.download_queue.id_to_job(id)
|
||||
return job
|
||||
except UnknownJobIDException as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
|
||||
|
||||
@download_queue_router.delete(
|
||||
"/i/{id}",
|
||||
operation_id="cancel_download_job",
|
||||
responses={
|
||||
204: {"description": "Job has been cancelled"},
|
||||
404: {"description": "The requested download JobID could not be found"},
|
||||
},
|
||||
)
|
||||
async def cancel_download_job(
|
||||
id: int = Path(description="ID of the download job to cancel."),
|
||||
):
|
||||
"""Cancel a download job using its ID."""
|
||||
try:
|
||||
queue = ApiDependencies.invoker.services.download_queue
|
||||
job = queue.id_to_job(id)
|
||||
queue.cancel_job(job)
|
||||
return Response(status_code=204)
|
||||
except UnknownJobIDException as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
|
||||
|
||||
@download_queue_router.delete(
|
||||
"/i",
|
||||
operation_id="cancel_all_download_jobs",
|
||||
responses={
|
||||
204: {"description": "Download jobs have been cancelled"},
|
||||
},
|
||||
)
|
||||
async def cancel_all_download_jobs():
|
||||
"""Cancel all download jobs."""
|
||||
ApiDependencies.invoker.services.download_queue.cancel_all_jobs()
|
||||
return Response(status_code=204)
|
@ -4,7 +4,7 @@
|
||||
|
||||
from hashlib import sha1
|
||||
from random import randbytes
|
||||
from typing import Any, Dict, List, Optional, Set
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from fastapi import Body, Path, Query, Response
|
||||
from fastapi.routing import APIRouter
|
||||
@ -16,41 +16,27 @@ from invokeai.app.services.model_install import ModelInstallJob, ModelSource
|
||||
from invokeai.app.services.model_records import (
|
||||
DuplicateModelException,
|
||||
InvalidModelException,
|
||||
ModelRecordOrderBy,
|
||||
ModelSummary,
|
||||
UnknownModelException,
|
||||
)
|
||||
from invokeai.app.services.shared.pagination import PaginatedResults
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
|
||||
model_records_router = APIRouter(prefix="/v1/model/record", tags=["model_manager_v2_unstable"])
|
||||
model_records_router = APIRouter(prefix="/v1/model/record", tags=["model_manager_v2"])
|
||||
|
||||
|
||||
class ModelsList(BaseModel):
|
||||
"""Return list of configs."""
|
||||
|
||||
models: List[AnyModelConfig]
|
||||
models: list[AnyModelConfig]
|
||||
|
||||
model_config = ConfigDict(use_enum_values=True)
|
||||
|
||||
|
||||
class ModelTagSet(BaseModel):
|
||||
"""Return tags for a set of models."""
|
||||
|
||||
key: str
|
||||
name: str
|
||||
author: str
|
||||
tags: Set[str]
|
||||
|
||||
|
||||
@model_records_router.get(
|
||||
"/",
|
||||
operation_id="list_model_records",
|
||||
@ -59,9 +45,6 @@ async def list_model_records(
|
||||
base_models: Optional[List[BaseModelType]] = Query(default=None, description="Base models to include"),
|
||||
model_type: Optional[ModelType] = Query(default=None, description="The type of model to get"),
|
||||
model_name: Optional[str] = Query(default=None, description="Exact match on the name of the model"),
|
||||
model_format: Optional[ModelFormat] = Query(
|
||||
default=None, description="Exact match on the format of the model (e.g. 'diffusers')"
|
||||
),
|
||||
) -> ModelsList:
|
||||
"""Get a list of models."""
|
||||
record_store = ApiDependencies.invoker.services.model_records
|
||||
@ -69,14 +52,10 @@ async def list_model_records(
|
||||
if base_models:
|
||||
for base_model in base_models:
|
||||
found_models.extend(
|
||||
record_store.search_by_attr(
|
||||
base_model=base_model, model_type=model_type, model_name=model_name, model_format=model_format
|
||||
)
|
||||
record_store.search_by_attr(base_model=base_model, model_type=model_type, model_name=model_name)
|
||||
)
|
||||
else:
|
||||
found_models.extend(
|
||||
record_store.search_by_attr(model_type=model_type, model_name=model_name, model_format=model_format)
|
||||
)
|
||||
found_models.extend(record_store.search_by_attr(model_type=model_type, model_name=model_name))
|
||||
return ModelsList(models=found_models)
|
||||
|
||||
|
||||
@ -100,59 +79,6 @@ async def get_model_record(
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
|
||||
|
||||
@model_records_router.get("/meta", operation_id="list_model_summary")
|
||||
async def list_model_summary(
|
||||
page: int = Query(default=0, description="The page to get"),
|
||||
per_page: int = Query(default=10, description="The number of models per page"),
|
||||
order_by: ModelRecordOrderBy = Query(default=ModelRecordOrderBy.Default, description="The attribute to order by"),
|
||||
) -> PaginatedResults[ModelSummary]:
|
||||
"""Gets a page of model summary data."""
|
||||
return ApiDependencies.invoker.services.model_records.list_models(page=page, per_page=per_page, order_by=order_by)
|
||||
|
||||
|
||||
@model_records_router.get(
|
||||
"/meta/i/{key}",
|
||||
operation_id="get_model_metadata",
|
||||
responses={
|
||||
200: {"description": "Success"},
|
||||
400: {"description": "Bad request"},
|
||||
404: {"description": "No metadata available"},
|
||||
},
|
||||
)
|
||||
async def get_model_metadata(
|
||||
key: str = Path(description="Key of the model repo metadata to fetch."),
|
||||
) -> Optional[AnyModelRepoMetadata]:
|
||||
"""Get a model metadata object."""
|
||||
record_store = ApiDependencies.invoker.services.model_records
|
||||
result = record_store.get_metadata(key)
|
||||
if not result:
|
||||
raise HTTPException(status_code=404, detail="No metadata for a model with this key")
|
||||
return result
|
||||
|
||||
|
||||
@model_records_router.get(
|
||||
"/tags",
|
||||
operation_id="list_tags",
|
||||
)
|
||||
async def list_tags() -> Set[str]:
|
||||
"""Get a unique set of all the model tags."""
|
||||
record_store = ApiDependencies.invoker.services.model_records
|
||||
return record_store.list_tags()
|
||||
|
||||
|
||||
@model_records_router.get(
|
||||
"/tags/search",
|
||||
operation_id="search_by_metadata_tags",
|
||||
)
|
||||
async def search_by_metadata_tags(
|
||||
tags: Set[str] = Query(default=None, description="Tags to search for"),
|
||||
) -> ModelsList:
|
||||
"""Get a list of models."""
|
||||
record_store = ApiDependencies.invoker.services.model_records
|
||||
results = record_store.search_by_metadata_tag(tags)
|
||||
return ModelsList(models=results)
|
||||
|
||||
|
||||
@model_records_router.patch(
|
||||
"/i/{key}",
|
||||
operation_id="update_model_record",
|
||||
@ -226,7 +152,9 @@ async def del_model_record(
|
||||
async def add_model_record(
|
||||
config: Annotated[AnyModelConfig, Body(description="Model config", discriminator="type")],
|
||||
) -> AnyModelConfig:
|
||||
"""Add a model using the configuration information appropriate for its type."""
|
||||
"""
|
||||
Add a model using the configuration information appropriate for its type.
|
||||
"""
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
record_store = ApiDependencies.invoker.services.model_records
|
||||
if config.key == "<NOKEY>":
|
||||
@ -308,7 +236,7 @@ async def import_model(
|
||||
Installation occurs in the background. Either use list_model_install_jobs()
|
||||
to poll for completion, or listen on the event bus for the following events:
|
||||
|
||||
"model_install_running"
|
||||
"model_install_started"
|
||||
"model_install_completed"
|
||||
"model_install_error"
|
||||
|
||||
@ -344,46 +272,16 @@ async def import_model(
|
||||
operation_id="list_model_install_jobs",
|
||||
)
|
||||
async def list_model_install_jobs() -> List[ModelInstallJob]:
|
||||
"""Return list of model install jobs."""
|
||||
"""
|
||||
Return list of model install jobs.
|
||||
|
||||
If the optional 'source' argument is provided, then the list will be filtered
|
||||
for partial string matches against the install source.
|
||||
"""
|
||||
jobs: List[ModelInstallJob] = ApiDependencies.invoker.services.model_install.list_jobs()
|
||||
return jobs
|
||||
|
||||
|
||||
@model_records_router.get(
|
||||
"/import/{id}",
|
||||
operation_id="get_model_install_job",
|
||||
responses={
|
||||
200: {"description": "Success"},
|
||||
404: {"description": "No such job"},
|
||||
},
|
||||
)
|
||||
async def get_model_install_job(id: int = Path(description="Model install id")) -> ModelInstallJob:
|
||||
"""Return model install job corresponding to the given source."""
|
||||
try:
|
||||
return ApiDependencies.invoker.services.model_install.get_job_by_id(id)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
|
||||
|
||||
@model_records_router.delete(
|
||||
"/import/{id}",
|
||||
operation_id="cancel_model_install_job",
|
||||
responses={
|
||||
201: {"description": "The job was cancelled successfully"},
|
||||
415: {"description": "No such job"},
|
||||
},
|
||||
status_code=201,
|
||||
)
|
||||
async def cancel_model_install_job(id: int = Path(description="Model install job ID")) -> None:
|
||||
"""Cancel the model install job(s) corresponding to the given job ID."""
|
||||
installer = ApiDependencies.invoker.services.model_install
|
||||
try:
|
||||
job = installer.get_job_by_id(id)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=415, detail=str(e))
|
||||
installer.cancel_job(job)
|
||||
|
||||
|
||||
@model_records_router.patch(
|
||||
"/import",
|
||||
operation_id="prune_model_install_jobs",
|
||||
@ -393,7 +291,9 @@ async def cancel_model_install_job(id: int = Path(description="Model install job
|
||||
},
|
||||
)
|
||||
async def prune_model_install_jobs() -> Response:
|
||||
"""Prune all completed and errored jobs from the install job list."""
|
||||
"""
|
||||
Prune all completed and errored jobs from the install job list.
|
||||
"""
|
||||
ApiDependencies.invoker.services.model_install.prune_jobs()
|
||||
return Response(status_code=204)
|
||||
|
||||
@ -408,9 +308,7 @@ async def prune_model_install_jobs() -> Response:
|
||||
)
|
||||
async def sync_models_to_config() -> Response:
|
||||
"""
|
||||
Traverse the models and autoimport directories.
|
||||
|
||||
Model files without a corresponding
|
||||
Traverse the models and autoimport directories. Model files without a corresponding
|
||||
record in the database are added. Orphan records without a models file are deleted.
|
||||
"""
|
||||
ApiDependencies.invoker.services.model_install.sync_to_config()
|
||||
|
@ -23,11 +23,10 @@ class DynamicPromptsResponse(BaseModel):
|
||||
)
|
||||
async def parse_dynamicprompts(
|
||||
prompt: str = Body(description="The prompt to parse with dynamicprompts"),
|
||||
max_prompts: int = Body(ge=1, le=10000, default=1000, description="The max number of prompts to generate"),
|
||||
max_prompts: int = Body(default=1000, description="The max number of prompts to generate"),
|
||||
combinatorial: bool = Body(default=True, description="Whether to use the combinatorial generator"),
|
||||
) -> DynamicPromptsResponse:
|
||||
"""Creates a batch process"""
|
||||
max_prompts = min(max_prompts, 10000)
|
||||
generator: Union[RandomPromptGenerator, CombinatorialPromptGenerator]
|
||||
try:
|
||||
error: Optional[str] = None
|
||||
|
@ -3,7 +3,6 @@
|
||||
# values from the command line or config file.
|
||||
import sys
|
||||
|
||||
from invokeai.app.api.no_cache_staticfiles import NoCacheStaticFiles
|
||||
from invokeai.version.invokeai_version import __version__
|
||||
|
||||
from .services.config import InvokeAIAppConfig
|
||||
@ -28,7 +27,8 @@ if True: # hack to make flake8 happy with imports coming after setting up the c
|
||||
from fastapi.middleware.gzip import GZipMiddleware
|
||||
from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html
|
||||
from fastapi.openapi.utils import get_openapi
|
||||
from fastapi.responses import HTMLResponse
|
||||
from fastapi.responses import FileResponse, HTMLResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from fastapi_events.handlers.local import local_handler
|
||||
from fastapi_events.middleware import EventHandlerASGIMiddleware
|
||||
from pydantic.json_schema import models_json_schema
|
||||
@ -45,7 +45,6 @@ if True: # hack to make flake8 happy with imports coming after setting up the c
|
||||
app_info,
|
||||
board_images,
|
||||
boards,
|
||||
download_queue,
|
||||
images,
|
||||
model_records,
|
||||
models,
|
||||
@ -76,7 +75,7 @@ mimetypes.add_type("text/css", ".css")
|
||||
|
||||
# Create the app
|
||||
# TODO: create this all in a method so configuration/etc. can be passed in?
|
||||
app = FastAPI(title="Invoke - Community Edition", docs_url=None, redoc_url=None, separate_input_output_schemas=False)
|
||||
app = FastAPI(title="Invoke AI", docs_url=None, redoc_url=None, separate_input_output_schemas=False)
|
||||
|
||||
# Add event handler
|
||||
event_handler_id: int = id(app)
|
||||
@ -117,7 +116,6 @@ app.include_router(sessions.session_router, prefix="/api")
|
||||
app.include_router(utilities.utilities_router, prefix="/api")
|
||||
app.include_router(models.models_router, prefix="/api")
|
||||
app.include_router(model_records.model_records_router, prefix="/api")
|
||||
app.include_router(download_queue.download_queue_router, prefix="/api")
|
||||
app.include_router(images.images_router, prefix="/api")
|
||||
app.include_router(boards.boards_router, prefix="/api")
|
||||
app.include_router(board_images.board_images_router, prefix="/api")
|
||||
@ -205,8 +203,8 @@ app.openapi = custom_openapi # type: ignore [method-assign] # this is a valid a
|
||||
def overridden_swagger() -> HTMLResponse:
|
||||
return get_swagger_ui_html(
|
||||
openapi_url=app.openapi_url, # type: ignore [arg-type] # this is always a string
|
||||
title=f"{app.title} - Swagger UI",
|
||||
swagger_favicon_url="static/docs/invoke-favicon-docs.svg",
|
||||
title=app.title,
|
||||
swagger_favicon_url="/static/docs/favicon.ico",
|
||||
)
|
||||
|
||||
|
||||
@ -214,20 +212,26 @@ def overridden_swagger() -> HTMLResponse:
|
||||
def overridden_redoc() -> HTMLResponse:
|
||||
return get_redoc_html(
|
||||
openapi_url=app.openapi_url, # type: ignore [arg-type] # this is always a string
|
||||
title=f"{app.title} - Redoc",
|
||||
redoc_favicon_url="static/docs/invoke-favicon-docs.svg",
|
||||
title=app.title,
|
||||
redoc_favicon_url="/static/docs/favicon.ico",
|
||||
)
|
||||
|
||||
|
||||
web_root_path = Path(list(web_dir.__path__)[0])
|
||||
|
||||
try:
|
||||
app.mount("/", NoCacheStaticFiles(directory=Path(web_root_path, "dist"), html=True), name="ui")
|
||||
except RuntimeError:
|
||||
logger.warn(f"No UI found at {web_root_path}/dist, skipping UI mount")
|
||||
app.mount(
|
||||
"/static", NoCacheStaticFiles(directory=Path(web_root_path, "static/")), name="static"
|
||||
) # docs favicon is in here
|
||||
# Only serve the UI if we it has a build
|
||||
if (web_root_path / "dist").exists():
|
||||
# Cannot add headers to StaticFiles, so we must serve index.html with a custom route
|
||||
# Add cache-control: no-store header to prevent caching of index.html, which leads to broken UIs at release
|
||||
@app.get("/", include_in_schema=False, name="ui_root")
|
||||
def get_index() -> FileResponse:
|
||||
return FileResponse(Path(web_root_path, "dist/index.html"), headers={"Cache-Control": "no-store"})
|
||||
|
||||
# # Must mount *after* the other routes else it borks em
|
||||
app.mount("/assets", StaticFiles(directory=Path(web_root_path, "dist/assets/")), name="assets")
|
||||
app.mount("/locales", StaticFiles(directory=Path(web_root_path, "dist/locales/")), name="locales")
|
||||
|
||||
app.mount("/static", StaticFiles(directory=Path(web_root_path, "static/")), name="static") # docs favicon is in here
|
||||
|
||||
|
||||
def invoke_api() -> None:
|
||||
|
@ -1,3 +1,4 @@
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional, Union
|
||||
|
||||
@ -16,7 +17,6 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
|
||||
from ...backend.model_management.lora import ModelPatcher
|
||||
from ...backend.model_management.models import ModelNotFoundException, ModelType
|
||||
from ...backend.util.devices import torch_dtype
|
||||
from ..util.ti_utils import extract_ti_triggers_from_prompt
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
@ -87,7 +87,7 @@ class CompelInvocation(BaseInvocation):
|
||||
# loras = [(context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.clip.loras]
|
||||
|
||||
ti_list = []
|
||||
for trigger in extract_ti_triggers_from_prompt(self.prompt):
|
||||
for trigger in re.findall(r"<[a-zA-Z0-9., _-]+>", self.prompt):
|
||||
name = trigger[1:-1]
|
||||
try:
|
||||
ti_list.append(
|
||||
@ -210,7 +210,7 @@ class SDXLPromptInvocationBase:
|
||||
# loras = [(context.services.model_manager.get_model(**lora.dict(exclude={"weight"})).context.model, lora.weight) for lora in self.clip.loras]
|
||||
|
||||
ti_list = []
|
||||
for trigger in extract_ti_triggers_from_prompt(prompt):
|
||||
for trigger in re.findall(r"<[a-zA-Z0-9., _-]+>", prompt):
|
||||
name = trigger[1:-1]
|
||||
try:
|
||||
ti_list.append(
|
||||
|
@ -24,13 +24,11 @@ from controlnet_aux import (
|
||||
)
|
||||
from controlnet_aux.util import HWC3, ade_palette
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
||||
|
||||
from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.backend.image_util.depth_anything import DepthAnythingDetector
|
||||
|
||||
from ...backend.model_management import BaseModelType
|
||||
from .baseinvocation import (
|
||||
@ -77,16 +75,17 @@ class ControlField(BaseModel):
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
|
||||
|
||||
@field_validator("control_weight")
|
||||
@classmethod
|
||||
def validate_control_weight(cls, v):
|
||||
validate_weights(v)
|
||||
"""Validate that all control weights in the valid range"""
|
||||
if isinstance(v, list):
|
||||
for i in v:
|
||||
if i < -1 or i > 2:
|
||||
raise ValueError("Control weights must be within -1 to 2 range")
|
||||
else:
|
||||
if v < -1 or v > 2:
|
||||
raise ValueError("Control weights must be within -1 to 2 range")
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self):
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
|
||||
@invocation_output("control_output")
|
||||
class ControlOutput(BaseInvocationOutput):
|
||||
@ -96,17 +95,17 @@ class ControlOutput(BaseInvocationOutput):
|
||||
control: ControlField = OutputField(description=FieldDescriptions.control)
|
||||
|
||||
|
||||
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.1")
|
||||
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.0")
|
||||
class ControlNetInvocation(BaseInvocation):
|
||||
"""Collects ControlNet info to pass to other nodes"""
|
||||
|
||||
image: ImageField = InputField(description="The control image")
|
||||
control_model: ControlNetModelField = InputField(description=FieldDescriptions.controlnet_model, input=Input.Direct)
|
||||
control_weight: Union[float, List[float]] = InputField(
|
||||
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"
|
||||
default=1.0, description="The weight given to the ControlNet"
|
||||
)
|
||||
begin_step_percent: float = InputField(
|
||||
default=0, ge=0, le=1, description="When the ControlNet is first applied (% of total steps)"
|
||||
default=0, ge=-1, le=2, description="When the ControlNet is first applied (% of total steps)"
|
||||
)
|
||||
end_step_percent: float = InputField(
|
||||
default=1, ge=0, le=1, description="When the ControlNet is last applied (% of total steps)"
|
||||
@ -114,17 +113,6 @@ class ControlNetInvocation(BaseInvocation):
|
||||
control_mode: CONTROLNET_MODE_VALUES = InputField(default="balanced", description="The control mode used")
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = InputField(default="just_resize", description="The resize mode used")
|
||||
|
||||
@field_validator("control_weight")
|
||||
@classmethod
|
||||
def validate_control_weight(cls, v):
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self) -> "ControlNetInvocation":
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ControlOutput:
|
||||
return ControlOutput(
|
||||
control=ControlField(
|
||||
@ -603,33 +591,3 @@ class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
|
||||
color_map = cv2.resize(color_map, (width, height), interpolation=cv2.INTER_NEAREST)
|
||||
color_map = Image.fromarray(color_map)
|
||||
return color_map
|
||||
|
||||
|
||||
DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small"]
|
||||
|
||||
|
||||
@invocation(
|
||||
"depth_anything_image_processor",
|
||||
title="Depth Anything Processor",
|
||||
tags=["controlnet", "depth", "depth anything"],
|
||||
category="controlnet",
|
||||
version="1.0.0",
|
||||
)
|
||||
class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates a depth map based on the Depth Anything algorithm"""
|
||||
|
||||
model_size: DEPTH_ANYTHING_MODEL_SIZES = InputField(
|
||||
default="small", description="The size of the depth model to use"
|
||||
)
|
||||
resolution: int = InputField(default=512, ge=64, multiple_of=64, description=FieldDescriptions.image_res)
|
||||
offload: bool = InputField(default=False)
|
||||
|
||||
def run_processor(self, image):
|
||||
depth_anything_detector = DepthAnythingDetector()
|
||||
depth_anything_detector.load_model(model_size=self.model_size)
|
||||
|
||||
if image.mode == "RGBA":
|
||||
image = image.convert("RGB")
|
||||
|
||||
processed_image = depth_anything_detector(image=image, resolution=self.resolution, offload=self.offload)
|
||||
return processed_image
|
||||
|
@ -13,15 +13,7 @@ from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
|
||||
from invokeai.backend.image_util.safety_checker import SafetyChecker
|
||||
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
Classification,
|
||||
Input,
|
||||
InputField,
|
||||
InvocationContext,
|
||||
WithMetadata,
|
||||
invocation,
|
||||
)
|
||||
from .baseinvocation import BaseInvocation, Input, InputField, InvocationContext, WithMetadata, invocation
|
||||
|
||||
|
||||
@invocation("show_image", title="Show Image", tags=["image"], category="image", version="1.0.0")
|
||||
@ -429,64 +421,6 @@ class ImageBlurInvocation(BaseInvocation, WithMetadata):
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"unsharp_mask",
|
||||
title="Unsharp Mask",
|
||||
tags=["image", "unsharp_mask"],
|
||||
category="image",
|
||||
version="1.2.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class UnsharpMaskInvocation(BaseInvocation, WithMetadata):
|
||||
"""Applies an unsharp mask filter to an image"""
|
||||
|
||||
image: ImageField = InputField(description="The image to use")
|
||||
radius: float = InputField(gt=0, description="Unsharp mask radius", default=2)
|
||||
strength: float = InputField(ge=0, description="Unsharp mask strength", default=50)
|
||||
|
||||
def pil_from_array(self, arr):
|
||||
return Image.fromarray((arr * 255).astype("uint8"))
|
||||
|
||||
def array_from_pil(self, img):
|
||||
return numpy.array(img) / 255
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.services.images.get_pil_image(self.image.image_name)
|
||||
mode = image.mode
|
||||
|
||||
alpha_channel = image.getchannel("A") if mode == "RGBA" else None
|
||||
image = image.convert("RGB")
|
||||
image_blurred = self.array_from_pil(image.filter(ImageFilter.GaussianBlur(radius=self.radius)))
|
||||
|
||||
image = self.array_from_pil(image)
|
||||
image += (image - image_blurred) * (self.strength / 100.0)
|
||||
image = numpy.clip(image, 0, 1)
|
||||
image = self.pil_from_array(image)
|
||||
|
||||
image = image.convert(mode)
|
||||
|
||||
# Make the image RGBA if we had a source alpha channel
|
||||
if alpha_channel is not None:
|
||||
image.putalpha(alpha_channel)
|
||||
|
||||
image_dto = context.services.images.create(
|
||||
image=image,
|
||||
image_origin=ResourceOrigin.INTERNAL,
|
||||
image_category=ImageCategory.GENERAL,
|
||||
node_id=self.id,
|
||||
session_id=context.graph_execution_state_id,
|
||||
is_intermediate=self.is_intermediate,
|
||||
metadata=self.metadata,
|
||||
workflow=context.workflow,
|
||||
)
|
||||
|
||||
return ImageOutput(
|
||||
image=ImageField(image_name=image_dto.image_name),
|
||||
width=image.width,
|
||||
height=image.height,
|
||||
)
|
||||
|
||||
|
||||
PIL_RESAMPLING_MODES = Literal[
|
||||
"nearest",
|
||||
"box",
|
||||
|
@ -2,7 +2,7 @@ import os
|
||||
from builtins import float
|
||||
from typing import List, Union
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
@ -15,7 +15,6 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.backend.model_management.models.base import BaseModelType, ModelType
|
||||
from invokeai.backend.model_management.models.ip_adapter import get_ip_adapter_image_encoder_model_id
|
||||
@ -40,6 +39,7 @@ class IPAdapterField(BaseModel):
|
||||
ip_adapter_model: IPAdapterModelField = Field(description="The IP-Adapter model to use.")
|
||||
image_encoder_model: CLIPVisionModelField = Field(description="The name of the CLIP image encoder model.")
|
||||
weight: Union[float, List[float]] = Field(default=1, description="The weight given to the ControlNet")
|
||||
# weight: float = Field(default=1.0, ge=0, description="The weight of the IP-Adapter.")
|
||||
begin_step_percent: float = Field(
|
||||
default=0, ge=0, le=1, description="When the IP-Adapter is first applied (% of total steps)"
|
||||
)
|
||||
@ -47,17 +47,6 @@ class IPAdapterField(BaseModel):
|
||||
default=1, ge=0, le=1, description="When the IP-Adapter is last applied (% of total steps)"
|
||||
)
|
||||
|
||||
@field_validator("weight")
|
||||
@classmethod
|
||||
def validate_ip_adapter_weight(cls, v):
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self):
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
|
||||
@invocation_output("ip_adapter_output")
|
||||
class IPAdapterOutput(BaseInvocationOutput):
|
||||
@ -65,7 +54,7 @@ class IPAdapterOutput(BaseInvocationOutput):
|
||||
ip_adapter: IPAdapterField = OutputField(description=FieldDescriptions.ip_adapter, title="IP-Adapter")
|
||||
|
||||
|
||||
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.1.1")
|
||||
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.1.0")
|
||||
class IPAdapterInvocation(BaseInvocation):
|
||||
"""Collects IP-Adapter info to pass to other nodes."""
|
||||
|
||||
@ -75,27 +64,18 @@ class IPAdapterInvocation(BaseInvocation):
|
||||
description="The IP-Adapter model.", title="IP-Adapter Model", input=Input.Direct, ui_order=-1
|
||||
)
|
||||
|
||||
# weight: float = InputField(default=1.0, description="The weight of the IP-Adapter.", ui_type=UIType.Float)
|
||||
weight: Union[float, List[float]] = InputField(
|
||||
default=1, description="The weight given to the IP-Adapter", title="Weight"
|
||||
default=1, ge=-1, description="The weight given to the IP-Adapter", title="Weight"
|
||||
)
|
||||
|
||||
begin_step_percent: float = InputField(
|
||||
default=0, ge=0, le=1, description="When the IP-Adapter is first applied (% of total steps)"
|
||||
default=0, ge=-1, le=2, description="When the IP-Adapter is first applied (% of total steps)"
|
||||
)
|
||||
end_step_percent: float = InputField(
|
||||
default=1, ge=0, le=1, description="When the IP-Adapter is last applied (% of total steps)"
|
||||
)
|
||||
|
||||
@field_validator("weight")
|
||||
@classmethod
|
||||
def validate_ip_adapter_weight(cls, v):
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self):
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IPAdapterOutput:
|
||||
# Lookup the CLIP Vision encoder that is intended to be used with the IP-Adapter model.
|
||||
ip_adapter_info = context.services.model_manager.model_info(
|
||||
|
@ -1,6 +1,5 @@
|
||||
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
import math
|
||||
from contextlib import ExitStack
|
||||
from functools import singledispatchmethod
|
||||
from typing import List, Literal, Optional, Union
|
||||
@ -221,7 +220,7 @@ def get_scheduler(
|
||||
title="Denoise Latents",
|
||||
tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
|
||||
category="latents",
|
||||
version="1.5.1",
|
||||
version="1.5.0",
|
||||
)
|
||||
class DenoiseLatentsInvocation(BaseInvocation):
|
||||
"""Denoises noisy latents to decodable images"""
|
||||
@ -280,7 +279,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
ui_order=7,
|
||||
)
|
||||
cfg_rescale_multiplier: float = InputField(
|
||||
title="CFG Rescale Multiplier", default=0, ge=0, lt=1, description=FieldDescriptions.cfg_rescale_multiplier
|
||||
default=0, ge=0, lt=1, description=FieldDescriptions.cfg_rescale_multiplier
|
||||
)
|
||||
latents: Optional[LatentsField] = InputField(
|
||||
default=None,
|
||||
@ -1229,57 +1228,3 @@ class CropLatentsCoreInvocation(BaseInvocation):
|
||||
context.services.latents.save(name, cropped_latents)
|
||||
|
||||
return build_latents_output(latents_name=name, latents=cropped_latents)
|
||||
|
||||
|
||||
@invocation_output("ideal_size_output")
|
||||
class IdealSizeOutput(BaseInvocationOutput):
|
||||
"""Base class for invocations that output an image"""
|
||||
|
||||
width: int = OutputField(description="The ideal width of the image (in pixels)")
|
||||
height: int = OutputField(description="The ideal height of the image (in pixels)")
|
||||
|
||||
|
||||
@invocation(
|
||||
"ideal_size",
|
||||
title="Ideal Size",
|
||||
tags=["latents", "math", "ideal_size"],
|
||||
version="1.0.2",
|
||||
)
|
||||
class IdealSizeInvocation(BaseInvocation):
|
||||
"""Calculates the ideal size for generation to avoid duplication"""
|
||||
|
||||
width: int = InputField(default=1024, description="Final image width")
|
||||
height: int = InputField(default=576, description="Final image height")
|
||||
unet: UNetField = InputField(default=None, description=FieldDescriptions.unet)
|
||||
multiplier: float = InputField(
|
||||
default=1.0,
|
||||
description="Amount to multiply the model's dimensions by when calculating the ideal size (may result in initial generation artifacts if too large)",
|
||||
)
|
||||
|
||||
def trim_to_multiple_of(self, *args, multiple_of=LATENT_SCALE_FACTOR):
|
||||
return tuple((x - x % multiple_of) for x in args)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> IdealSizeOutput:
|
||||
aspect = self.width / self.height
|
||||
dimension = 512
|
||||
if self.unet.unet.base_model == BaseModelType.StableDiffusion2:
|
||||
dimension = 768
|
||||
elif self.unet.unet.base_model == BaseModelType.StableDiffusionXL:
|
||||
dimension = 1024
|
||||
dimension = dimension * self.multiplier
|
||||
min_dimension = math.floor(dimension * 0.5)
|
||||
model_area = dimension * dimension # hardcoded for now since all models are trained on square images
|
||||
|
||||
if aspect > 1.0:
|
||||
init_height = max(min_dimension, math.sqrt(model_area / aspect))
|
||||
init_width = init_height * aspect
|
||||
else:
|
||||
init_width = max(min_dimension, math.sqrt(model_area * aspect))
|
||||
init_height = init_width / aspect
|
||||
|
||||
scaled_width, scaled_height = self.trim_to_multiple_of(
|
||||
math.floor(init_width),
|
||||
math.floor(init_height),
|
||||
)
|
||||
|
||||
return IdealSizeOutput(width=scaled_width, height=scaled_height)
|
||||
|
@ -1,6 +1,7 @@
|
||||
# Copyright (c) 2023 Borisov Sergey (https://github.com/StAlKeR7779)
|
||||
|
||||
import inspect
|
||||
import re
|
||||
|
||||
# from contextlib import ExitStack
|
||||
from typing import List, Literal, Union
|
||||
@ -20,7 +21,6 @@ from invokeai.backend import BaseModelType, ModelType, SubModelType
|
||||
from ...backend.model_management import ONNXModelPatcher
|
||||
from ...backend.stable_diffusion import PipelineIntermediateState
|
||||
from ...backend.util import choose_torch_device
|
||||
from ..util.ti_utils import extract_ti_triggers_from_prompt
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
@ -78,7 +78,7 @@ class ONNXPromptInvocation(BaseInvocation):
|
||||
]
|
||||
|
||||
ti_list = []
|
||||
for trigger in extract_ti_triggers_from_prompt(self.prompt):
|
||||
for trigger in re.findall(r"<[a-zA-Z0-9., _-]+>", self.prompt):
|
||||
name = trigger[1:-1]
|
||||
try:
|
||||
ti_list.append(
|
||||
|
@ -1,6 +1,6 @@
|
||||
from typing import Union
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
@ -14,7 +14,6 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
)
|
||||
from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_RESIZE_VALUES
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.shared.fields import FieldDescriptions
|
||||
from invokeai.backend.model_management.models.base import BaseModelType
|
||||
|
||||
@ -38,17 +37,6 @@ class T2IAdapterField(BaseModel):
|
||||
)
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
|
||||
|
||||
@field_validator("weight")
|
||||
@classmethod
|
||||
def validate_ip_adapter_weight(cls, v):
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self):
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
|
||||
@invocation_output("t2i_adapter_output")
|
||||
class T2IAdapterOutput(BaseInvocationOutput):
|
||||
@ -56,7 +44,7 @@ class T2IAdapterOutput(BaseInvocationOutput):
|
||||
|
||||
|
||||
@invocation(
|
||||
"t2i_adapter", title="T2I-Adapter", tags=["t2i_adapter", "control"], category="t2i_adapter", version="1.0.1"
|
||||
"t2i_adapter", title="T2I-Adapter", tags=["t2i_adapter", "control"], category="t2i_adapter", version="1.0.0"
|
||||
)
|
||||
class T2IAdapterInvocation(BaseInvocation):
|
||||
"""Collects T2I-Adapter info to pass to other nodes."""
|
||||
@ -73,7 +61,7 @@ class T2IAdapterInvocation(BaseInvocation):
|
||||
default=1, ge=0, description="The weight given to the T2I-Adapter", title="Weight"
|
||||
)
|
||||
begin_step_percent: float = InputField(
|
||||
default=0, ge=0, le=1, description="When the T2I-Adapter is first applied (% of total steps)"
|
||||
default=0, ge=-1, le=2, description="When the T2I-Adapter is first applied (% of total steps)"
|
||||
)
|
||||
end_step_percent: float = InputField(
|
||||
default=1, ge=0, le=1, description="When the T2I-Adapter is last applied (% of total steps)"
|
||||
@ -83,17 +71,6 @@ class T2IAdapterInvocation(BaseInvocation):
|
||||
description="The resize mode applied to the T2I-Adapter input image so that it matches the target output size.",
|
||||
)
|
||||
|
||||
@field_validator("weight")
|
||||
@classmethod
|
||||
def validate_ip_adapter_weight(cls, v):
|
||||
validate_weights(v)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_begin_end_step_percent(self):
|
||||
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||
return self
|
||||
|
||||
def invoke(self, context: InvocationContext) -> T2IAdapterOutput:
|
||||
return T2IAdapterOutput(
|
||||
t2i_adapter=T2IAdapterField(
|
||||
|
@ -38,14 +38,7 @@ class CalculateImageTilesOutput(BaseInvocationOutput):
|
||||
tiles: list[Tile] = OutputField(description="The tiles coordinates that cover a particular image shape.")
|
||||
|
||||
|
||||
@invocation(
|
||||
"calculate_image_tiles",
|
||||
title="Calculate Image Tiles",
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.0.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
@invocation("calculate_image_tiles", title="Calculate Image Tiles", tags=["tiles"], category="tiles", version="1.0.0")
|
||||
class CalculateImageTilesInvocation(BaseInvocation):
|
||||
"""Calculate the coordinates and overlaps of tiles that cover a target image shape."""
|
||||
|
||||
@ -77,7 +70,7 @@ class CalculateImageTilesInvocation(BaseInvocation):
|
||||
title="Calculate Image Tiles Even Split",
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.1.0",
|
||||
version="1.0.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class CalculateImageTilesEvenSplitInvocation(BaseInvocation):
|
||||
@ -97,11 +90,11 @@ class CalculateImageTilesEvenSplitInvocation(BaseInvocation):
|
||||
ge=1,
|
||||
description="Number of tiles to divide image into on the y axis",
|
||||
)
|
||||
overlap: int = InputField(
|
||||
default=128,
|
||||
overlap_fraction: float = InputField(
|
||||
default=0.25,
|
||||
ge=0,
|
||||
multiple_of=8,
|
||||
description="The overlap, in pixels, between adjacent tiles.",
|
||||
lt=1,
|
||||
description="Overlap between adjacent tiles as a fraction of the tile's dimensions (0-1)",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> CalculateImageTilesOutput:
|
||||
@ -110,7 +103,7 @@ class CalculateImageTilesEvenSplitInvocation(BaseInvocation):
|
||||
image_width=self.image_width,
|
||||
num_tiles_x=self.num_tiles_x,
|
||||
num_tiles_y=self.num_tiles_y,
|
||||
overlap=self.overlap,
|
||||
overlap_fraction=self.overlap_fraction,
|
||||
)
|
||||
return CalculateImageTilesOutput(tiles=tiles)
|
||||
|
||||
|
@ -1,14 +0,0 @@
|
||||
from typing import Union
|
||||
|
||||
|
||||
def validate_weights(weights: Union[float, list[float]]) -> None:
|
||||
"""Validate that all control weights in the valid range"""
|
||||
to_validate = weights if isinstance(weights, list) else [weights]
|
||||
if any(i < -1 or i > 2 for i in to_validate):
|
||||
raise ValueError("Control weights must be within -1 to 2 range")
|
||||
|
||||
|
||||
def validate_begin_end_step(begin_step_percent: float, end_step_percent: float) -> None:
|
||||
"""Validate that begin_step_percent is less than end_step_percent"""
|
||||
if begin_step_percent >= end_step_percent:
|
||||
raise ValueError("Begin step percent must be less than or equal to end step percent")
|
@ -1,7 +1,5 @@
|
||||
"""Init file for InvokeAI configure package."""
|
||||
|
||||
from invokeai.app.services.config.config_common import PagingArgumentParser
|
||||
|
||||
from .config_default import InvokeAIAppConfig, get_invokeai_config
|
||||
|
||||
__all__ = ["InvokeAIAppConfig", "get_invokeai_config", "PagingArgumentParser"]
|
||||
__all__ = ["InvokeAIAppConfig", "get_invokeai_config"]
|
||||
|
@ -209,7 +209,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
"""Configuration object for InvokeAI App."""
|
||||
|
||||
singleton_config: ClassVar[Optional[InvokeAIAppConfig]] = None
|
||||
singleton_init: ClassVar[Optional[Dict[str, Any]]] = None
|
||||
singleton_init: ClassVar[Optional[Dict]] = None
|
||||
|
||||
# fmt: off
|
||||
type: Literal["InvokeAI"] = "InvokeAI"
|
||||
@ -263,7 +263,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
|
||||
# DEVICE
|
||||
device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", json_schema_extra=Categories.Device)
|
||||
precision : Literal["auto", "float16", "bfloat16", "float32", "autocast"] = Field(default="auto", description="Floating point precision", json_schema_extra=Categories.Device)
|
||||
precision : Literal["auto", "float16", "float32", "autocast"] = Field(default="auto", description="Floating point precision", json_schema_extra=Categories.Device)
|
||||
|
||||
# GENERATION
|
||||
sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", json_schema_extra=Categories.Generation)
|
||||
@ -301,8 +301,8 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
self,
|
||||
argv: Optional[list[str]] = None,
|
||||
conf: Optional[DictConfig] = None,
|
||||
clobber: Optional[bool] = False,
|
||||
) -> None:
|
||||
clobber=False,
|
||||
):
|
||||
"""
|
||||
Update settings with contents of init file, environment, and command-line settings.
|
||||
|
||||
@ -337,7 +337,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_config(cls, **kwargs: Any) -> InvokeAIAppConfig:
|
||||
def get_config(cls, **kwargs: Dict[str, Any]) -> InvokeAIAppConfig:
|
||||
"""Return a singleton InvokeAIAppConfig configuration object."""
|
||||
if (
|
||||
cls.singleton_config is None
|
||||
@ -356,7 +356,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
else:
|
||||
root = self.find_root().expanduser().absolute()
|
||||
self.root = root # insulate ourselves from relative paths that may change
|
||||
return root.resolve()
|
||||
return root
|
||||
|
||||
@property
|
||||
def root_dir(self) -> Path:
|
||||
@ -455,7 +455,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
||||
return _find_root()
|
||||
|
||||
|
||||
def get_invokeai_config(**kwargs: Any) -> InvokeAIAppConfig:
|
||||
def get_invokeai_config(**kwargs) -> InvokeAIAppConfig:
|
||||
"""Legacy function which returns InvokeAIAppConfig.get_config()."""
|
||||
return InvokeAIAppConfig.get_config(**kwargs)
|
||||
|
||||
|
@ -1,12 +0,0 @@
|
||||
"""Init file for download queue."""
|
||||
from .download_base import DownloadJob, DownloadJobStatus, DownloadQueueServiceBase, UnknownJobIDException
|
||||
from .download_default import DownloadQueueService, TqdmProgress
|
||||
|
||||
__all__ = [
|
||||
"DownloadJob",
|
||||
"DownloadQueueServiceBase",
|
||||
"DownloadQueueService",
|
||||
"TqdmProgress",
|
||||
"DownloadJobStatus",
|
||||
"UnknownJobIDException",
|
||||
]
|
@ -1,262 +0,0 @@
|
||||
# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Development Team
|
||||
"""Model download service."""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from functools import total_ordering
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field, PrivateAttr
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
|
||||
|
||||
class DownloadJobStatus(str, Enum):
|
||||
"""State of a download job."""
|
||||
|
||||
WAITING = "waiting" # not enqueued, will not run
|
||||
RUNNING = "running" # actively downloading
|
||||
COMPLETED = "completed" # finished running
|
||||
CANCELLED = "cancelled" # user cancelled
|
||||
ERROR = "error" # terminated with an error message
|
||||
|
||||
|
||||
class DownloadJobCancelledException(Exception):
|
||||
"""This exception is raised when a download job is cancelled."""
|
||||
|
||||
|
||||
class UnknownJobIDException(Exception):
|
||||
"""This exception is raised when an invalid job id is referened."""
|
||||
|
||||
|
||||
class ServiceInactiveException(Exception):
|
||||
"""This exception is raised when user attempts to initiate a download before the service is started."""
|
||||
|
||||
|
||||
DownloadEventHandler = Callable[["DownloadJob"], None]
|
||||
DownloadExceptionHandler = Callable[["DownloadJob", Optional[Exception]], None]
|
||||
|
||||
|
||||
@total_ordering
|
||||
class DownloadJob(BaseModel):
|
||||
"""Class to monitor and control a model download request."""
|
||||
|
||||
# required variables to be passed in on creation
|
||||
source: AnyHttpUrl = Field(description="Where to download from. Specific types specified in child classes.")
|
||||
dest: Path = Field(description="Destination of downloaded model on local disk; a directory or file path")
|
||||
access_token: Optional[str] = Field(default=None, description="authorization token for protected resources")
|
||||
# automatically assigned on creation
|
||||
id: int = Field(description="Numeric ID of this job", default=-1) # default id is a sentinel
|
||||
priority: int = Field(default=10, description="Queue priority; lower values are higher priority")
|
||||
|
||||
# set internally during download process
|
||||
status: DownloadJobStatus = Field(default=DownloadJobStatus.WAITING, description="Status of the download")
|
||||
download_path: Optional[Path] = Field(default=None, description="Final location of downloaded file")
|
||||
job_started: Optional[str] = Field(default=None, description="Timestamp for when the download job started")
|
||||
job_ended: Optional[str] = Field(
|
||||
default=None, description="Timestamp for when the download job ende1d (completed or errored)"
|
||||
)
|
||||
content_type: Optional[str] = Field(default=None, description="Content type of downloaded file")
|
||||
bytes: int = Field(default=0, description="Bytes downloaded so far")
|
||||
total_bytes: int = Field(default=0, description="Total file size (bytes)")
|
||||
|
||||
# set when an error occurs
|
||||
error_type: Optional[str] = Field(default=None, description="Name of exception that caused an error")
|
||||
error: Optional[str] = Field(default=None, description="Traceback of the exception that caused an error")
|
||||
|
||||
# internal flag
|
||||
_cancelled: bool = PrivateAttr(default=False)
|
||||
|
||||
# optional event handlers passed in on creation
|
||||
_on_start: Optional[DownloadEventHandler] = PrivateAttr(default=None)
|
||||
_on_progress: Optional[DownloadEventHandler] = PrivateAttr(default=None)
|
||||
_on_complete: Optional[DownloadEventHandler] = PrivateAttr(default=None)
|
||||
_on_cancelled: Optional[DownloadEventHandler] = PrivateAttr(default=None)
|
||||
_on_error: Optional[DownloadExceptionHandler] = PrivateAttr(default=None)
|
||||
|
||||
def __hash__(self) -> int:
|
||||
"""Return hash of the string representation of this object, for indexing."""
|
||||
return hash(str(self))
|
||||
|
||||
def __le__(self, other: "DownloadJob") -> bool:
|
||||
"""Return True if this job's priority is less than another's."""
|
||||
return self.priority <= other.priority
|
||||
|
||||
def cancel(self) -> None:
|
||||
"""Call to cancel the job."""
|
||||
self._cancelled = True
|
||||
|
||||
# cancelled and the callbacks are private attributes in order to prevent
|
||||
# them from being serialized and/or used in the Json Schema
|
||||
@property
|
||||
def cancelled(self) -> bool:
|
||||
"""Call to cancel the job."""
|
||||
return self._cancelled
|
||||
|
||||
@property
|
||||
def complete(self) -> bool:
|
||||
"""Return true if job completed without errors."""
|
||||
return self.status == DownloadJobStatus.COMPLETED
|
||||
|
||||
@property
|
||||
def running(self) -> bool:
|
||||
"""Return true if the job is running."""
|
||||
return self.status == DownloadJobStatus.RUNNING
|
||||
|
||||
@property
|
||||
def errored(self) -> bool:
|
||||
"""Return true if the job is errored."""
|
||||
return self.status == DownloadJobStatus.ERROR
|
||||
|
||||
@property
|
||||
def in_terminal_state(self) -> bool:
|
||||
"""Return true if job has finished, one way or another."""
|
||||
return self.status not in [DownloadJobStatus.WAITING, DownloadJobStatus.RUNNING]
|
||||
|
||||
@property
|
||||
def on_start(self) -> Optional[DownloadEventHandler]:
|
||||
"""Return the on_start event handler."""
|
||||
return self._on_start
|
||||
|
||||
@property
|
||||
def on_progress(self) -> Optional[DownloadEventHandler]:
|
||||
"""Return the on_progress event handler."""
|
||||
return self._on_progress
|
||||
|
||||
@property
|
||||
def on_complete(self) -> Optional[DownloadEventHandler]:
|
||||
"""Return the on_complete event handler."""
|
||||
return self._on_complete
|
||||
|
||||
@property
|
||||
def on_error(self) -> Optional[DownloadExceptionHandler]:
|
||||
"""Return the on_error event handler."""
|
||||
return self._on_error
|
||||
|
||||
@property
|
||||
def on_cancelled(self) -> Optional[DownloadEventHandler]:
|
||||
"""Return the on_cancelled event handler."""
|
||||
return self._on_cancelled
|
||||
|
||||
def set_callbacks(
|
||||
self,
|
||||
on_start: Optional[DownloadEventHandler] = None,
|
||||
on_progress: Optional[DownloadEventHandler] = None,
|
||||
on_complete: Optional[DownloadEventHandler] = None,
|
||||
on_cancelled: Optional[DownloadEventHandler] = None,
|
||||
on_error: Optional[DownloadExceptionHandler] = None,
|
||||
) -> None:
|
||||
"""Set the callbacks for download events."""
|
||||
self._on_start = on_start
|
||||
self._on_progress = on_progress
|
||||
self._on_complete = on_complete
|
||||
self._on_error = on_error
|
||||
self._on_cancelled = on_cancelled
|
||||
|
||||
|
||||
class DownloadQueueServiceBase(ABC):
|
||||
"""Multithreaded queue for downloading models via URL."""
|
||||
|
||||
@abstractmethod
|
||||
def start(self, *args: Any, **kwargs: Any) -> None:
|
||||
"""Start the download worker threads."""
|
||||
|
||||
@abstractmethod
|
||||
def stop(self, *args: Any, **kwargs: Any) -> None:
|
||||
"""Stop the download worker threads."""
|
||||
|
||||
@abstractmethod
|
||||
def download(
|
||||
self,
|
||||
source: AnyHttpUrl,
|
||||
dest: Path,
|
||||
priority: int = 10,
|
||||
access_token: Optional[str] = None,
|
||||
on_start: Optional[DownloadEventHandler] = None,
|
||||
on_progress: Optional[DownloadEventHandler] = None,
|
||||
on_complete: Optional[DownloadEventHandler] = None,
|
||||
on_cancelled: Optional[DownloadEventHandler] = None,
|
||||
on_error: Optional[DownloadExceptionHandler] = None,
|
||||
) -> DownloadJob:
|
||||
"""
|
||||
Create and enqueue download job.
|
||||
|
||||
:param source: Source of the download as a URL.
|
||||
:param dest: Path to download to. See below.
|
||||
:param on_start, on_progress, on_complete, on_error: Callbacks for the indicated
|
||||
events.
|
||||
:returns: A DownloadJob object for monitoring the state of the download.
|
||||
|
||||
The `dest` argument is a Path object. Its behavior is:
|
||||
|
||||
1. If the path exists and is a directory, then the URL contents will be downloaded
|
||||
into that directory using the filename indicated in the response's `Content-Disposition` field.
|
||||
If no content-disposition is present, then the last component of the URL will be used (similar to
|
||||
wget's behavior).
|
||||
2. If the path does not exist, then it is taken as the name of a new file to create with the downloaded
|
||||
content.
|
||||
3. If the path exists and is an existing file, then the downloader will try to resume the download from
|
||||
the end of the existing file.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def submit_download_job(
|
||||
self,
|
||||
job: DownloadJob,
|
||||
on_start: Optional[DownloadEventHandler] = None,
|
||||
on_progress: Optional[DownloadEventHandler] = None,
|
||||
on_complete: Optional[DownloadEventHandler] = None,
|
||||
on_cancelled: Optional[DownloadEventHandler] = None,
|
||||
on_error: Optional[DownloadExceptionHandler] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Enqueue a download job.
|
||||
|
||||
:param job: The DownloadJob
|
||||
:param on_start, on_progress, on_complete, on_error: Callbacks for the indicated
|
||||
events.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def list_jobs(self) -> List[DownloadJob]:
|
||||
"""
|
||||
List active download jobs.
|
||||
|
||||
:returns List[DownloadJob]: List of download jobs whose state is not "completed."
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def id_to_job(self, id: int) -> DownloadJob:
|
||||
"""
|
||||
Return the DownloadJob corresponding to the integer ID.
|
||||
|
||||
:param id: ID of the DownloadJob.
|
||||
|
||||
Exceptions:
|
||||
* UnknownJobIDException
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def cancel_all_jobs(self) -> None:
|
||||
"""Cancel all active and enquedjobs."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def prune_jobs(self) -> None:
|
||||
"""Prune completed and errored queue items from the job list."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def cancel_job(self, job: DownloadJob) -> None:
|
||||
"""Cancel the job, clearing partial downloads and putting it into ERROR state."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def join(self) -> None:
|
||||
"""Wait until all jobs are off the queue."""
|
||||
pass
|
@ -1,437 +0,0 @@
|
||||
# Copyright (c) 2023, Lincoln D. Stein
|
||||
"""Implementation of multithreaded download queue for invokeai."""
|
||||
|
||||
import os
|
||||
import re
|
||||
import threading
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
from queue import Empty, PriorityQueue
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import requests
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
from requests import HTTPError
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
from invokeai.app.util.misc import get_iso_timestamp
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
from .download_base import (
|
||||
DownloadEventHandler,
|
||||
DownloadExceptionHandler,
|
||||
DownloadJob,
|
||||
DownloadJobCancelledException,
|
||||
DownloadJobStatus,
|
||||
DownloadQueueServiceBase,
|
||||
ServiceInactiveException,
|
||||
UnknownJobIDException,
|
||||
)
|
||||
|
||||
# Maximum number of bytes to download during each call to requests.iter_content()
|
||||
DOWNLOAD_CHUNK_SIZE = 100000
|
||||
|
||||
|
||||
class DownloadQueueService(DownloadQueueServiceBase):
|
||||
"""Class for queued download of models."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
max_parallel_dl: int = 5,
|
||||
event_bus: Optional[EventServiceBase] = None,
|
||||
requests_session: Optional[requests.sessions.Session] = None,
|
||||
):
|
||||
"""
|
||||
Initialize DownloadQueue.
|
||||
|
||||
:param max_parallel_dl: Number of simultaneous downloads allowed [5].
|
||||
:param requests_session: Optional requests.sessions.Session object, for unit tests.
|
||||
"""
|
||||
self._jobs = {}
|
||||
self._next_job_id = 0
|
||||
self._queue = PriorityQueue()
|
||||
self._stop_event = threading.Event()
|
||||
self._worker_pool = set()
|
||||
self._lock = threading.Lock()
|
||||
self._logger = InvokeAILogger.get_logger("DownloadQueueService")
|
||||
self._event_bus = event_bus
|
||||
self._requests = requests_session or requests.Session()
|
||||
self._accept_download_requests = False
|
||||
self._max_parallel_dl = max_parallel_dl
|
||||
|
||||
def start(self, *args: Any, **kwargs: Any) -> None:
|
||||
"""Start the download worker threads."""
|
||||
with self._lock:
|
||||
if self._worker_pool:
|
||||
raise Exception("Attempt to start the download service twice")
|
||||
self._stop_event.clear()
|
||||
self._start_workers(self._max_parallel_dl)
|
||||
self._accept_download_requests = True
|
||||
|
||||
def stop(self, *args: Any, **kwargs: Any) -> None:
|
||||
"""Stop the download worker threads."""
|
||||
with self._lock:
|
||||
if not self._worker_pool:
|
||||
raise Exception("Attempt to stop the download service before it was started")
|
||||
self._accept_download_requests = False # reject attempts to add new jobs to queue
|
||||
queued_jobs = [x for x in self.list_jobs() if x.status == DownloadJobStatus.WAITING]
|
||||
active_jobs = [x for x in self.list_jobs() if x.status == DownloadJobStatus.RUNNING]
|
||||
if queued_jobs:
|
||||
self._logger.warning(f"Cancelling {len(queued_jobs)} queued downloads")
|
||||
if active_jobs:
|
||||
self._logger.info(f"Waiting for {len(active_jobs)} active download jobs to complete")
|
||||
with self._queue.mutex:
|
||||
self._queue.queue.clear()
|
||||
self.join() # wait for all active jobs to finish
|
||||
self._stop_event.set()
|
||||
self._worker_pool.clear()
|
||||
|
||||
def submit_download_job(
|
||||
self,
|
||||
job: DownloadJob,
|
||||
on_start: Optional[DownloadEventHandler] = None,
|
||||
on_progress: Optional[DownloadEventHandler] = None,
|
||||
on_complete: Optional[DownloadEventHandler] = None,
|
||||
on_cancelled: Optional[DownloadEventHandler] = None,
|
||||
on_error: Optional[DownloadExceptionHandler] = None,
|
||||
) -> None:
|
||||
"""Enqueue a download job."""
|
||||
if not self._accept_download_requests:
|
||||
raise ServiceInactiveException(
|
||||
"The download service is not currently accepting requests. Please call start() to initialize the service."
|
||||
)
|
||||
with self._lock:
|
||||
job.id = self._next_job_id
|
||||
self._next_job_id += 1
|
||||
job.set_callbacks(
|
||||
on_start=on_start,
|
||||
on_progress=on_progress,
|
||||
on_complete=on_complete,
|
||||
on_cancelled=on_cancelled,
|
||||
on_error=on_error,
|
||||
)
|
||||
self._jobs[job.id] = job
|
||||
self._queue.put(job)
|
||||
|
||||
def download(
|
||||
self,
|
||||
source: AnyHttpUrl,
|
||||
dest: Path,
|
||||
priority: int = 10,
|
||||
access_token: Optional[str] = None,
|
||||
on_start: Optional[DownloadEventHandler] = None,
|
||||
on_progress: Optional[DownloadEventHandler] = None,
|
||||
on_complete: Optional[DownloadEventHandler] = None,
|
||||
on_cancelled: Optional[DownloadEventHandler] = None,
|
||||
on_error: Optional[DownloadExceptionHandler] = None,
|
||||
) -> DownloadJob:
|
||||
"""Create and enqueue a download job and return it."""
|
||||
if not self._accept_download_requests:
|
||||
raise ServiceInactiveException(
|
||||
"The download service is not currently accepting requests. Please call start() to initialize the service."
|
||||
)
|
||||
job = DownloadJob(
|
||||
source=source,
|
||||
dest=dest,
|
||||
priority=priority,
|
||||
access_token=access_token,
|
||||
)
|
||||
self.submit_download_job(
|
||||
job,
|
||||
on_start=on_start,
|
||||
on_progress=on_progress,
|
||||
on_complete=on_complete,
|
||||
on_cancelled=on_cancelled,
|
||||
on_error=on_error,
|
||||
)
|
||||
return job
|
||||
|
||||
def join(self) -> None:
|
||||
"""Wait for all jobs to complete."""
|
||||
self._queue.join()
|
||||
|
||||
def list_jobs(self) -> List[DownloadJob]:
|
||||
"""List all the jobs."""
|
||||
return list(self._jobs.values())
|
||||
|
||||
def prune_jobs(self) -> None:
|
||||
"""Prune completed and errored queue items from the job list."""
|
||||
with self._lock:
|
||||
to_delete = set()
|
||||
for job_id, job in self._jobs.items():
|
||||
if job.in_terminal_state:
|
||||
to_delete.add(job_id)
|
||||
for job_id in to_delete:
|
||||
del self._jobs[job_id]
|
||||
|
||||
def id_to_job(self, id: int) -> DownloadJob:
|
||||
"""Translate a job ID into a DownloadJob object."""
|
||||
try:
|
||||
return self._jobs[id]
|
||||
except KeyError as excp:
|
||||
raise UnknownJobIDException("Unrecognized job") from excp
|
||||
|
||||
def cancel_job(self, job: DownloadJob) -> None:
|
||||
"""
|
||||
Cancel the indicated job.
|
||||
|
||||
If it is running it will be stopped.
|
||||
job.status will be set to DownloadJobStatus.CANCELLED
|
||||
"""
|
||||
with self._lock:
|
||||
job.cancel()
|
||||
|
||||
def cancel_all_jobs(self) -> None:
|
||||
"""Cancel all jobs (those not in enqueued, running or paused state)."""
|
||||
for job in self._jobs.values():
|
||||
if not job.in_terminal_state:
|
||||
self.cancel_job(job)
|
||||
|
||||
def _start_workers(self, max_workers: int) -> None:
|
||||
"""Start the requested number of worker threads."""
|
||||
self._stop_event.clear()
|
||||
for i in range(0, max_workers): # noqa B007
|
||||
worker = threading.Thread(target=self._download_next_item, daemon=True)
|
||||
self._logger.debug(f"Download queue worker thread {worker.name} starting.")
|
||||
worker.start()
|
||||
self._worker_pool.add(worker)
|
||||
|
||||
def _download_next_item(self) -> None:
|
||||
"""Worker thread gets next job on priority queue."""
|
||||
done = False
|
||||
while not done:
|
||||
if self._stop_event.is_set():
|
||||
done = True
|
||||
continue
|
||||
try:
|
||||
job = self._queue.get(timeout=1)
|
||||
except Empty:
|
||||
continue
|
||||
|
||||
try:
|
||||
job.job_started = get_iso_timestamp()
|
||||
self._do_download(job)
|
||||
self._signal_job_complete(job)
|
||||
|
||||
except (OSError, HTTPError) as excp:
|
||||
job.error_type = excp.__class__.__name__ + f"({str(excp)})"
|
||||
job.error = traceback.format_exc()
|
||||
self._signal_job_error(job, excp)
|
||||
except DownloadJobCancelledException:
|
||||
self._signal_job_cancelled(job)
|
||||
self._cleanup_cancelled_job(job)
|
||||
|
||||
finally:
|
||||
job.job_ended = get_iso_timestamp()
|
||||
self._queue.task_done()
|
||||
self._logger.debug(f"Download queue worker thread {threading.current_thread().name} exiting.")
|
||||
|
||||
def _do_download(self, job: DownloadJob) -> None:
|
||||
"""Do the actual download."""
|
||||
url = job.source
|
||||
header = {"Authorization": f"Bearer {job.access_token}"} if job.access_token else {}
|
||||
open_mode = "wb"
|
||||
|
||||
# Make a streaming request. This will retrieve headers including
|
||||
# content-length and content-disposition, but not fetch any content itself
|
||||
resp = self._requests.get(str(url), headers=header, stream=True)
|
||||
if not resp.ok:
|
||||
raise HTTPError(resp.reason)
|
||||
|
||||
job.content_type = resp.headers.get("Content-Type")
|
||||
content_length = int(resp.headers.get("content-length", 0))
|
||||
job.total_bytes = content_length
|
||||
|
||||
if job.dest.is_dir():
|
||||
file_name = os.path.basename(str(url.path)) # default is to use the last bit of the URL
|
||||
|
||||
if match := re.search('filename="(.+)"', resp.headers.get("Content-Disposition", "")):
|
||||
remote_name = match.group(1)
|
||||
if self._validate_filename(job.dest.as_posix(), remote_name):
|
||||
file_name = remote_name
|
||||
|
||||
job.download_path = job.dest / file_name
|
||||
|
||||
else:
|
||||
job.dest.parent.mkdir(parents=True, exist_ok=True)
|
||||
job.download_path = job.dest
|
||||
|
||||
assert job.download_path
|
||||
|
||||
# Don't clobber an existing file. See commit 82c2c85202f88c6d24ff84710f297cfc6ae174af
|
||||
# for code that instead resumes an interrupted download.
|
||||
if job.download_path.exists():
|
||||
raise OSError(f"[Errno 17] File {job.download_path} exists")
|
||||
|
||||
# append ".downloading" to the path
|
||||
in_progress_path = self._in_progress_path(job.download_path)
|
||||
|
||||
# signal caller that the download is starting. At this point, key fields such as
|
||||
# download_path and total_bytes will be populated. We call it here because the might
|
||||
# discover that the local file is already complete and generate a COMPLETED status.
|
||||
self._signal_job_started(job)
|
||||
|
||||
# "range not satisfiable" - local file is at least as large as the remote file
|
||||
if resp.status_code == 416 or (content_length > 0 and job.bytes >= content_length):
|
||||
self._logger.warning(f"{job.download_path}: complete file found. Skipping.")
|
||||
return
|
||||
|
||||
# "partial content" - local file is smaller than remote file
|
||||
elif resp.status_code == 206 or job.bytes > 0:
|
||||
self._logger.warning(f"{job.download_path}: partial file found. Resuming")
|
||||
|
||||
# some other error
|
||||
elif resp.status_code != 200:
|
||||
raise HTTPError(resp.reason)
|
||||
|
||||
self._logger.debug(f"{job.source}: Downloading {job.download_path}")
|
||||
report_delta = job.total_bytes / 100 # report every 1% change
|
||||
last_report_bytes = 0
|
||||
|
||||
# DOWNLOAD LOOP
|
||||
with open(in_progress_path, open_mode) as file:
|
||||
for data in resp.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):
|
||||
if job.cancelled:
|
||||
raise DownloadJobCancelledException("Job was cancelled at caller's request")
|
||||
|
||||
job.bytes += file.write(data)
|
||||
if (job.bytes - last_report_bytes >= report_delta) or (job.bytes >= job.total_bytes):
|
||||
last_report_bytes = job.bytes
|
||||
self._signal_job_progress(job)
|
||||
|
||||
# if we get here we are done and can rename the file to the original dest
|
||||
self._logger.debug(f"{job.source}: saved to {job.download_path} (bytes={job.bytes})")
|
||||
in_progress_path.rename(job.download_path)
|
||||
|
||||
def _validate_filename(self, directory: str, filename: str) -> bool:
|
||||
pc_name_max = os.pathconf(directory, "PC_NAME_MAX") if hasattr(os, "pathconf") else 260 # hardcoded for windows
|
||||
pc_path_max = (
|
||||
os.pathconf(directory, "PC_PATH_MAX") if hasattr(os, "pathconf") else 32767
|
||||
) # hardcoded for windows with long names enabled
|
||||
if "/" in filename:
|
||||
return False
|
||||
if filename.startswith(".."):
|
||||
return False
|
||||
if len(filename) > pc_name_max:
|
||||
return False
|
||||
if len(os.path.join(directory, filename)) > pc_path_max:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _in_progress_path(self, path: Path) -> Path:
|
||||
return path.with_name(path.name + ".downloading")
|
||||
|
||||
def _signal_job_started(self, job: DownloadJob) -> None:
|
||||
job.status = DownloadJobStatus.RUNNING
|
||||
if job.on_start:
|
||||
try:
|
||||
job.on_start(job)
|
||||
except Exception as e:
|
||||
self._logger.error(
|
||||
f"An error occurred while processing the on_start callback: {traceback.format_exception(e)}"
|
||||
)
|
||||
if self._event_bus:
|
||||
assert job.download_path
|
||||
self._event_bus.emit_download_started(str(job.source), job.download_path.as_posix())
|
||||
|
||||
def _signal_job_progress(self, job: DownloadJob) -> None:
|
||||
if job.on_progress:
|
||||
try:
|
||||
job.on_progress(job)
|
||||
except Exception as e:
|
||||
self._logger.error(
|
||||
f"An error occurred while processing the on_progress callback: {traceback.format_exception(e)}"
|
||||
)
|
||||
if self._event_bus:
|
||||
assert job.download_path
|
||||
self._event_bus.emit_download_progress(
|
||||
str(job.source),
|
||||
download_path=job.download_path.as_posix(),
|
||||
current_bytes=job.bytes,
|
||||
total_bytes=job.total_bytes,
|
||||
)
|
||||
|
||||
def _signal_job_complete(self, job: DownloadJob) -> None:
|
||||
job.status = DownloadJobStatus.COMPLETED
|
||||
if job.on_complete:
|
||||
try:
|
||||
job.on_complete(job)
|
||||
except Exception as e:
|
||||
self._logger.error(
|
||||
f"An error occurred while processing the on_complete callback: {traceback.format_exception(e)}"
|
||||
)
|
||||
if self._event_bus:
|
||||
assert job.download_path
|
||||
self._event_bus.emit_download_complete(
|
||||
str(job.source), download_path=job.download_path.as_posix(), total_bytes=job.total_bytes
|
||||
)
|
||||
|
||||
def _signal_job_cancelled(self, job: DownloadJob) -> None:
|
||||
if job.status not in [DownloadJobStatus.RUNNING, DownloadJobStatus.WAITING]:
|
||||
return
|
||||
job.status = DownloadJobStatus.CANCELLED
|
||||
if job.on_cancelled:
|
||||
try:
|
||||
job.on_cancelled(job)
|
||||
except Exception as e:
|
||||
self._logger.error(
|
||||
f"An error occurred while processing the on_cancelled callback: {traceback.format_exception(e)}"
|
||||
)
|
||||
if self._event_bus:
|
||||
self._event_bus.emit_download_cancelled(str(job.source))
|
||||
|
||||
def _signal_job_error(self, job: DownloadJob, excp: Optional[Exception] = None) -> None:
|
||||
job.status = DownloadJobStatus.ERROR
|
||||
self._logger.error(f"{str(job.source)}: {traceback.format_exception(excp)}")
|
||||
if job.on_error:
|
||||
try:
|
||||
job.on_error(job, excp)
|
||||
except Exception as e:
|
||||
self._logger.error(
|
||||
f"An error occurred while processing the on_error callback: {traceback.format_exception(e)}"
|
||||
)
|
||||
if self._event_bus:
|
||||
assert job.error_type
|
||||
assert job.error
|
||||
self._event_bus.emit_download_error(str(job.source), error_type=job.error_type, error=job.error)
|
||||
|
||||
def _cleanup_cancelled_job(self, job: DownloadJob) -> None:
|
||||
self._logger.debug(f"Cleaning up leftover files from cancelled download job {job.download_path}")
|
||||
try:
|
||||
if job.download_path:
|
||||
partial_file = self._in_progress_path(job.download_path)
|
||||
partial_file.unlink()
|
||||
except OSError as excp:
|
||||
self._logger.warning(excp)
|
||||
|
||||
|
||||
# Example on_progress event handler to display a TQDM status bar
|
||||
# Activate with:
|
||||
# download_service.download('http://foo.bar/baz', '/tmp', on_progress=TqdmProgress().job_update
|
||||
class TqdmProgress(object):
|
||||
"""TQDM-based progress bar object to use in on_progress handlers."""
|
||||
|
||||
_bars: Dict[int, tqdm] # the tqdm object
|
||||
_last: Dict[int, int] # last bytes downloaded
|
||||
|
||||
def __init__(self) -> None: # noqa D107
|
||||
self._bars = {}
|
||||
self._last = {}
|
||||
|
||||
def update(self, job: DownloadJob) -> None: # noqa D102
|
||||
job_id = job.id
|
||||
# new job
|
||||
if job_id not in self._bars:
|
||||
assert job.download_path
|
||||
dest = Path(job.download_path).name
|
||||
self._bars[job_id] = tqdm(
|
||||
desc=dest,
|
||||
initial=0,
|
||||
total=job.total_bytes,
|
||||
unit="iB",
|
||||
unit_scale=True,
|
||||
)
|
||||
self._last[job_id] = 0
|
||||
self._bars[job_id].update(job.bytes - self._last[job_id])
|
||||
self._last[job_id] = job.bytes
|
@ -1,7 +1,7 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from typing import Any, Optional
|
||||
|
||||
from invokeai.app.services.invocation_processor.invocation_processor_common import ProgressImage
|
||||
from invokeai.app.services.session_queue.session_queue_common import (
|
||||
@ -17,7 +17,6 @@ from invokeai.backend.model_management.models.base import BaseModelType, ModelTy
|
||||
|
||||
class EventServiceBase:
|
||||
queue_event: str = "queue_event"
|
||||
download_event: str = "download_event"
|
||||
model_event: str = "model_event"
|
||||
|
||||
"""Basic event bus, to have an empty stand-in when not needed"""
|
||||
@ -33,13 +32,6 @@ class EventServiceBase:
|
||||
payload={"event": event_name, "data": payload},
|
||||
)
|
||||
|
||||
def __emit_download_event(self, event_name: str, payload: dict) -> None:
|
||||
payload["timestamp"] = get_timestamp()
|
||||
self.dispatch(
|
||||
event_name=EventServiceBase.download_event,
|
||||
payload={"event": event_name, "data": payload},
|
||||
)
|
||||
|
||||
def __emit_model_event(self, event_name: str, payload: dict) -> None:
|
||||
payload["timestamp"] = get_timestamp()
|
||||
self.dispatch(
|
||||
@ -331,145 +323,53 @@ class EventServiceBase:
|
||||
payload={"queue_id": queue_id},
|
||||
)
|
||||
|
||||
def emit_download_started(self, source: str, download_path: str) -> None:
|
||||
def emit_model_install_started(self, source: str) -> None:
|
||||
"""
|
||||
Emit when a download job is started.
|
||||
|
||||
:param url: The downloaded url
|
||||
"""
|
||||
self.__emit_download_event(
|
||||
event_name="download_started",
|
||||
payload={"source": source, "download_path": download_path},
|
||||
)
|
||||
|
||||
def emit_download_progress(self, source: str, download_path: str, current_bytes: int, total_bytes: int) -> None:
|
||||
"""
|
||||
Emit "download_progress" events at regular intervals during a download job.
|
||||
|
||||
:param source: The downloaded source
|
||||
:param download_path: The local downloaded file
|
||||
:param current_bytes: Number of bytes downloaded so far
|
||||
:param total_bytes: The size of the file being downloaded (if known)
|
||||
"""
|
||||
self.__emit_download_event(
|
||||
event_name="download_progress",
|
||||
payload={
|
||||
"source": source,
|
||||
"download_path": download_path,
|
||||
"current_bytes": current_bytes,
|
||||
"total_bytes": total_bytes,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_download_complete(self, source: str, download_path: str, total_bytes: int) -> None:
|
||||
"""
|
||||
Emit a "download_complete" event at the end of a successful download.
|
||||
|
||||
:param source: Source URL
|
||||
:param download_path: Path to the locally downloaded file
|
||||
:param total_bytes: The size of the downloaded file
|
||||
"""
|
||||
self.__emit_download_event(
|
||||
event_name="download_complete",
|
||||
payload={
|
||||
"source": source,
|
||||
"download_path": download_path,
|
||||
"total_bytes": total_bytes,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_download_cancelled(self, source: str) -> None:
|
||||
"""Emit a "download_cancelled" event in the event that the download was cancelled by user."""
|
||||
self.__emit_download_event(
|
||||
event_name="download_cancelled",
|
||||
payload={
|
||||
"source": source,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_download_error(self, source: str, error_type: str, error: str) -> None:
|
||||
"""
|
||||
Emit a "download_error" event when an download job encounters an exception.
|
||||
|
||||
:param source: Source URL
|
||||
:param error_type: The name of the exception that raised the error
|
||||
:param error: The traceback from this error
|
||||
"""
|
||||
self.__emit_download_event(
|
||||
event_name="download_error",
|
||||
payload={
|
||||
"source": source,
|
||||
"error_type": error_type,
|
||||
"error": error,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_model_install_downloading(
|
||||
self,
|
||||
source: str,
|
||||
local_path: str,
|
||||
bytes: int,
|
||||
total_bytes: int,
|
||||
parts: List[Dict[str, Union[str, int]]],
|
||||
) -> None:
|
||||
"""
|
||||
Emit at intervals while the install job is in progress (remote models only).
|
||||
|
||||
:param source: Source of the model
|
||||
:param local_path: Where model is downloading to
|
||||
:param parts: Progress of downloading URLs that comprise the model, if any.
|
||||
:param bytes: Number of bytes downloaded so far.
|
||||
:param total_bytes: Total size of download, including all files.
|
||||
This emits a Dict with keys "source", "local_path", "bytes" and "total_bytes".
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_downloading",
|
||||
payload={
|
||||
"source": source,
|
||||
"local_path": local_path,
|
||||
"bytes": bytes,
|
||||
"total_bytes": total_bytes,
|
||||
"parts": parts,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_model_install_running(self, source: str) -> None:
|
||||
"""
|
||||
Emit once when an install job becomes active.
|
||||
Emitted when an install job is started.
|
||||
|
||||
:param source: Source of the model; local path, repo_id or url
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_running",
|
||||
event_name="model_install_started",
|
||||
payload={"source": source},
|
||||
)
|
||||
|
||||
def emit_model_install_completed(self, source: str, key: str, total_bytes: Optional[int] = None) -> None:
|
||||
def emit_model_install_completed(self, source: str, key: str) -> None:
|
||||
"""
|
||||
Emit when an install job is completed successfully.
|
||||
Emitted when an install job is completed successfully.
|
||||
|
||||
:param source: Source of the model; local path, repo_id or url
|
||||
:param key: Model config record key
|
||||
:param total_bytes: Size of the model (may be None for installation of a local path)
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_completed",
|
||||
payload={
|
||||
"source": source,
|
||||
"total_bytes": total_bytes,
|
||||
"key": key,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_model_install_cancelled(self, source: str) -> None:
|
||||
def emit_model_install_progress(
|
||||
self,
|
||||
source: str,
|
||||
current_bytes: int,
|
||||
total_bytes: int,
|
||||
) -> None:
|
||||
"""
|
||||
Emit when an install job is cancelled.
|
||||
Emitted while the install job is in progress.
|
||||
(Downloaded models only)
|
||||
|
||||
:param source: Source of the model; local path, repo_id or url
|
||||
:param source: Source of the model
|
||||
:param current_bytes: Number of bytes downloaded so far
|
||||
:param total_bytes: Total bytes to download
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_cancelled",
|
||||
payload={"source": source},
|
||||
event_name="model_install_progress",
|
||||
payload={
|
||||
"source": source,
|
||||
"current_bytes": int,
|
||||
"total_bytes": int,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_model_install_error(
|
||||
@ -479,11 +379,10 @@ class EventServiceBase:
|
||||
error: str,
|
||||
) -> None:
|
||||
"""
|
||||
Emit when an install job encounters an exception.
|
||||
Emitted when an install job encounters an exception.
|
||||
|
||||
:param source: Source of the model
|
||||
:param error_type: The name of the exception
|
||||
:param error: A text description of the exception
|
||||
:param exception: The exception that raised the error
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_error",
|
||||
|
@ -132,6 +132,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
source_node_id=source_node_id,
|
||||
result=outputs.model_dump(),
|
||||
)
|
||||
self.__invoker.services.performance_statistics.log_stats()
|
||||
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
@ -194,7 +195,6 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||
error=traceback.format_exc(),
|
||||
)
|
||||
elif is_complete:
|
||||
self.__invoker.services.performance_statistics.log_stats(graph_execution_state.id)
|
||||
self.__invoker.services.events.emit_graph_execution_complete(
|
||||
queue_batch_id=queue_item.session_queue_batch_id,
|
||||
queue_item_id=queue_item.session_queue_item_id,
|
||||
|
@ -11,7 +11,6 @@ if TYPE_CHECKING:
|
||||
from .board_records.board_records_base import BoardRecordStorageBase
|
||||
from .boards.boards_base import BoardServiceABC
|
||||
from .config import InvokeAIAppConfig
|
||||
from .download import DownloadQueueServiceBase
|
||||
from .events.events_base import EventServiceBase
|
||||
from .image_files.image_files_base import ImageFileStorageBase
|
||||
from .image_records.image_records_base import ImageRecordStorageBase
|
||||
@ -28,7 +27,7 @@ if TYPE_CHECKING:
|
||||
from .names.names_base import NameServiceBase
|
||||
from .session_processor.session_processor_base import SessionProcessorBase
|
||||
from .session_queue.session_queue_base import SessionQueueBase
|
||||
from .shared.graph import GraphExecutionState
|
||||
from .shared.graph import GraphExecutionState, LibraryGraph
|
||||
from .urls.urls_base import UrlServiceBase
|
||||
from .workflow_records.workflow_records_base import WorkflowRecordsStorageBase
|
||||
|
||||
@ -44,6 +43,7 @@ class InvocationServices:
|
||||
configuration: "InvokeAIAppConfig"
|
||||
events: "EventServiceBase"
|
||||
graph_execution_manager: "ItemStorageABC[GraphExecutionState]"
|
||||
graph_library: "ItemStorageABC[LibraryGraph]"
|
||||
images: "ImageServiceABC"
|
||||
image_records: "ImageRecordStorageBase"
|
||||
image_files: "ImageFileStorageBase"
|
||||
@ -51,7 +51,6 @@ class InvocationServices:
|
||||
logger: "Logger"
|
||||
model_manager: "ModelManagerServiceBase"
|
||||
model_records: "ModelRecordServiceBase"
|
||||
download_queue: "DownloadQueueServiceBase"
|
||||
model_install: "ModelInstallServiceBase"
|
||||
processor: "InvocationProcessorABC"
|
||||
performance_statistics: "InvocationStatsServiceBase"
|
||||
@ -72,6 +71,7 @@ class InvocationServices:
|
||||
configuration: "InvokeAIAppConfig",
|
||||
events: "EventServiceBase",
|
||||
graph_execution_manager: "ItemStorageABC[GraphExecutionState]",
|
||||
graph_library: "ItemStorageABC[LibraryGraph]",
|
||||
images: "ImageServiceABC",
|
||||
image_files: "ImageFileStorageBase",
|
||||
image_records: "ImageRecordStorageBase",
|
||||
@ -79,7 +79,6 @@ class InvocationServices:
|
||||
logger: "Logger",
|
||||
model_manager: "ModelManagerServiceBase",
|
||||
model_records: "ModelRecordServiceBase",
|
||||
download_queue: "DownloadQueueServiceBase",
|
||||
model_install: "ModelInstallServiceBase",
|
||||
processor: "InvocationProcessorABC",
|
||||
performance_statistics: "InvocationStatsServiceBase",
|
||||
@ -98,6 +97,7 @@ class InvocationServices:
|
||||
self.configuration = configuration
|
||||
self.events = events
|
||||
self.graph_execution_manager = graph_execution_manager
|
||||
self.graph_library = graph_library
|
||||
self.images = images
|
||||
self.image_files = image_files
|
||||
self.image_records = image_records
|
||||
@ -105,7 +105,6 @@ class InvocationServices:
|
||||
self.logger = logger
|
||||
self.model_manager = model_manager
|
||||
self.model_records = model_records
|
||||
self.download_queue = download_queue
|
||||
self.model_install = model_install
|
||||
self.processor = processor
|
||||
self.performance_statistics = performance_statistics
|
||||
|
@ -30,13 +30,23 @@ writes to the system log is stored in InvocationServices.performance_statistics.
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from contextlib import AbstractContextManager
|
||||
from typing import Dict
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
||||
from invokeai.backend.model_management.model_cache import CacheStats
|
||||
|
||||
from .invocation_stats_common import NodeLog
|
||||
|
||||
|
||||
class InvocationStatsServiceBase(ABC):
|
||||
"Abstract base class for recording node memory/time performance statistics"
|
||||
|
||||
# {graph_id => NodeLog}
|
||||
_stats: Dict[str, NodeLog]
|
||||
_cache_stats: Dict[str, CacheStats]
|
||||
ram_used: float
|
||||
ram_changed: float
|
||||
|
||||
@abstractmethod
|
||||
def __init__(self):
|
||||
"""
|
||||
@ -67,8 +77,45 @@ class InvocationStatsServiceBase(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def log_stats(self, graph_execution_state_id: str):
|
||||
def reset_all_stats(self):
|
||||
"""Zero all statistics"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def update_invocation_stats(
|
||||
self,
|
||||
graph_id: str,
|
||||
invocation_type: str,
|
||||
time_used: float,
|
||||
vram_used: float,
|
||||
):
|
||||
"""
|
||||
Add timing information on execution of a node. Usually
|
||||
used internally.
|
||||
:param graph_id: ID of the graph that is currently executing
|
||||
:param invocation_type: String literal type of the node
|
||||
:param time_used: Time used by node's exection (sec)
|
||||
:param vram_used: Maximum VRAM used during exection (GB)
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def log_stats(self):
|
||||
"""
|
||||
Write out the accumulated statistics to the log or somewhere else.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def update_mem_stats(
|
||||
self,
|
||||
ram_used: float,
|
||||
ram_changed: float,
|
||||
):
|
||||
"""
|
||||
Update the collector with RAM memory usage info.
|
||||
|
||||
:param ram_used: How much RAM is currently in use.
|
||||
:param ram_changed: How much RAM changed since last generation.
|
||||
"""
|
||||
pass
|
||||
|
@ -1,84 +1,25 @@
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict
|
||||
|
||||
# size of GIG in bytes
|
||||
GIG = 1073741824
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeExecutionStats:
|
||||
"""Class for tracking execution stats of an invocation node."""
|
||||
class NodeStats:
|
||||
"""Class for tracking execution stats of an invocation node"""
|
||||
|
||||
invocation_type: str
|
||||
|
||||
start_time: float # Seconds since the epoch.
|
||||
end_time: float # Seconds since the epoch.
|
||||
|
||||
start_ram_gb: float # GB
|
||||
end_ram_gb: float # GB
|
||||
|
||||
peak_vram_gb: float # GB
|
||||
|
||||
def total_time(self) -> float:
|
||||
return self.end_time - self.start_time
|
||||
calls: int = 0
|
||||
time_used: float = 0.0 # seconds
|
||||
max_vram: float = 0.0 # GB
|
||||
cache_hits: int = 0
|
||||
cache_misses: int = 0
|
||||
cache_high_watermark: int = 0
|
||||
|
||||
|
||||
class GraphExecutionStats:
|
||||
"""Class for tracking execution stats of a graph."""
|
||||
@dataclass
|
||||
class NodeLog:
|
||||
"""Class for tracking node usage"""
|
||||
|
||||
def __init__(self):
|
||||
self._node_stats_list: list[NodeExecutionStats] = []
|
||||
|
||||
def add_node_execution_stats(self, node_stats: NodeExecutionStats):
|
||||
self._node_stats_list.append(node_stats)
|
||||
|
||||
def get_total_run_time(self) -> float:
|
||||
"""Get the total time spent executing nodes in the graph."""
|
||||
total = 0.0
|
||||
for node_stats in self._node_stats_list:
|
||||
total += node_stats.total_time()
|
||||
return total
|
||||
|
||||
def get_first_node_stats(self) -> NodeExecutionStats | None:
|
||||
"""Get the stats of the first node in the graph (by start_time)."""
|
||||
first_node = None
|
||||
for node_stats in self._node_stats_list:
|
||||
if first_node is None or node_stats.start_time < first_node.start_time:
|
||||
first_node = node_stats
|
||||
|
||||
assert first_node is not None
|
||||
return first_node
|
||||
|
||||
def get_last_node_stats(self) -> NodeExecutionStats | None:
|
||||
"""Get the stats of the last node in the graph (by end_time)."""
|
||||
last_node = None
|
||||
for node_stats in self._node_stats_list:
|
||||
if last_node is None or node_stats.end_time > last_node.end_time:
|
||||
last_node = node_stats
|
||||
|
||||
return last_node
|
||||
|
||||
def get_pretty_log(self, graph_execution_state_id: str) -> str:
|
||||
log = f"Graph stats: {graph_execution_state_id}\n"
|
||||
log += f"{'Node':>30} {'Calls':>7}{'Seconds':>9} {'VRAM Used':>10}\n"
|
||||
|
||||
# Log stats aggregated by node type.
|
||||
node_stats_by_type: dict[str, list[NodeExecutionStats]] = defaultdict(list)
|
||||
for node_stats in self._node_stats_list:
|
||||
node_stats_by_type[node_stats.invocation_type].append(node_stats)
|
||||
|
||||
for node_type, node_type_stats_list in node_stats_by_type.items():
|
||||
num_calls = len(node_type_stats_list)
|
||||
time_used = sum([n.total_time() for n in node_type_stats_list])
|
||||
peak_vram = max([n.peak_vram_gb for n in node_type_stats_list])
|
||||
log += f"{node_type:>30} {num_calls:>4} {time_used:7.3f}s {peak_vram:4.3f}G\n"
|
||||
|
||||
# Log stats for the entire graph.
|
||||
log += f"TOTAL GRAPH EXECUTION TIME: {self.get_total_run_time():7.3f}s\n"
|
||||
|
||||
first_node = self.get_first_node_stats()
|
||||
last_node = self.get_last_node_stats()
|
||||
if first_node is not None and last_node is not None:
|
||||
total_wall_time = last_node.end_time - first_node.start_time
|
||||
ram_change = last_node.end_ram_gb - first_node.start_ram_gb
|
||||
log += f"TOTAL GRAPH WALL TIME: {total_wall_time:7.3f}s\n"
|
||||
log += f"RAM used by InvokeAI process: {last_node.end_ram_gb:4.2f}G ({ram_change:+5.3f}G)\n"
|
||||
|
||||
return log
|
||||
# {node_type => NodeStats}
|
||||
nodes: Dict[str, NodeStats] = field(default_factory=dict)
|
||||
|
@ -1,5 +1,5 @@
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
from typing import Dict
|
||||
|
||||
import psutil
|
||||
import torch
|
||||
@ -7,119 +7,161 @@ import torch
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.model_manager.model_manager_base import ModelManagerServiceBase
|
||||
from invokeai.backend.model_management.model_cache import CacheStats
|
||||
|
||||
from .invocation_stats_base import InvocationStatsServiceBase
|
||||
from .invocation_stats_common import GraphExecutionStats, NodeExecutionStats
|
||||
|
||||
# Size of 1GB in bytes.
|
||||
GB = 2**30
|
||||
from .invocation_stats_common import GIG, NodeLog, NodeStats
|
||||
|
||||
|
||||
class InvocationStatsService(InvocationStatsServiceBase):
|
||||
"""Accumulate performance information about a running graph. Collects time spent in each node,
|
||||
as well as the maximum and current VRAM utilisation for CUDA systems"""
|
||||
|
||||
_invoker: Invoker
|
||||
|
||||
def __init__(self):
|
||||
# Maps graph_execution_state_id to GraphExecutionStats.
|
||||
self._stats: dict[str, GraphExecutionStats] = {}
|
||||
# Maps graph_execution_state_id to model manager CacheStats.
|
||||
self._cache_stats: dict[str, CacheStats] = {}
|
||||
# {graph_id => NodeLog}
|
||||
self._stats: Dict[str, NodeLog] = {}
|
||||
self._cache_stats: Dict[str, CacheStats] = {}
|
||||
self.ram_used: float = 0.0
|
||||
self.ram_changed: float = 0.0
|
||||
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
self._invoker = invoker
|
||||
|
||||
@contextmanager
|
||||
def collect_stats(self, invocation: BaseInvocation, graph_execution_state_id: str):
|
||||
if not self._stats.get(graph_execution_state_id):
|
||||
# First time we're seeing this graph_execution_state_id.
|
||||
self._stats[graph_execution_state_id] = GraphExecutionStats()
|
||||
self._cache_stats[graph_execution_state_id] = CacheStats()
|
||||
class StatsContext:
|
||||
"""Context manager for collecting statistics."""
|
||||
|
||||
# Prune stale stats. There should be none since we're starting a new graph, but just in case.
|
||||
self._prune_stale_stats()
|
||||
invocation: BaseInvocation
|
||||
collector: "InvocationStatsServiceBase"
|
||||
graph_id: str
|
||||
start_time: float
|
||||
ram_used: int
|
||||
model_manager: ModelManagerServiceBase
|
||||
|
||||
# Record state before the invocation.
|
||||
start_time = time.time()
|
||||
start_ram = psutil.Process().memory_info().rss
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
if self._invoker.services.model_manager:
|
||||
self._invoker.services.model_manager.collect_cache_stats(self._cache_stats[graph_execution_state_id])
|
||||
def __init__(
|
||||
self,
|
||||
invocation: BaseInvocation,
|
||||
graph_id: str,
|
||||
model_manager: ModelManagerServiceBase,
|
||||
collector: "InvocationStatsServiceBase",
|
||||
):
|
||||
"""Initialize statistics for this run."""
|
||||
self.invocation = invocation
|
||||
self.collector = collector
|
||||
self.graph_id = graph_id
|
||||
self.start_time = 0.0
|
||||
self.ram_used = 0
|
||||
self.model_manager = model_manager
|
||||
|
||||
try:
|
||||
# Let the invocation run.
|
||||
yield None
|
||||
finally:
|
||||
# Record state after the invocation.
|
||||
node_stats = NodeExecutionStats(
|
||||
invocation_type=invocation.type,
|
||||
start_time=start_time,
|
||||
end_time=time.time(),
|
||||
start_ram_gb=start_ram / GB,
|
||||
end_ram_gb=psutil.Process().memory_info().rss / GB,
|
||||
peak_vram_gb=torch.cuda.max_memory_allocated() / GB if torch.cuda.is_available() else 0.0,
|
||||
def __enter__(self):
|
||||
self.start_time = time.time()
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
self.ram_used = psutil.Process().memory_info().rss
|
||||
if self.model_manager:
|
||||
self.model_manager.collect_cache_stats(self.collector._cache_stats[self.graph_id])
|
||||
|
||||
def __exit__(self, *args):
|
||||
"""Called on exit from the context."""
|
||||
ram_used = psutil.Process().memory_info().rss
|
||||
self.collector.update_mem_stats(
|
||||
ram_used=ram_used / GIG,
|
||||
ram_changed=(ram_used - self.ram_used) / GIG,
|
||||
)
|
||||
self.collector.update_invocation_stats(
|
||||
graph_id=self.graph_id,
|
||||
invocation_type=self.invocation.type, # type: ignore # `type` is not on the `BaseInvocation` model, but *is* on all invocations
|
||||
time_used=time.time() - self.start_time,
|
||||
vram_used=torch.cuda.max_memory_allocated() / GIG if torch.cuda.is_available() else 0.0,
|
||||
)
|
||||
self._stats[graph_execution_state_id].add_node_execution_stats(node_stats)
|
||||
|
||||
def _prune_stale_stats(self):
|
||||
"""Check all graphs being tracked and prune any that have completed/errored.
|
||||
def collect_stats(
|
||||
self,
|
||||
invocation: BaseInvocation,
|
||||
graph_execution_state_id: str,
|
||||
) -> StatsContext:
|
||||
if not self._stats.get(graph_execution_state_id): # first time we're seeing this
|
||||
self._stats[graph_execution_state_id] = NodeLog()
|
||||
self._cache_stats[graph_execution_state_id] = CacheStats()
|
||||
return self.StatsContext(invocation, graph_execution_state_id, self._invoker.services.model_manager, self)
|
||||
|
||||
This shouldn't be necessary, but we don't have totally robust upstream handling of graph completions/errors, so
|
||||
for now we call this function periodically to prevent them from accumulating.
|
||||
"""
|
||||
to_prune = []
|
||||
for graph_execution_state_id in self._stats:
|
||||
def reset_all_stats(self):
|
||||
"""Zero all statistics"""
|
||||
self._stats = {}
|
||||
|
||||
def reset_stats(self, graph_execution_id: str):
|
||||
try:
|
||||
self._stats.pop(graph_execution_id)
|
||||
except KeyError:
|
||||
logger.warning(f"Attempted to clear statistics for unknown graph {graph_execution_id}")
|
||||
|
||||
def update_mem_stats(
|
||||
self,
|
||||
ram_used: float,
|
||||
ram_changed: float,
|
||||
):
|
||||
self.ram_used = ram_used
|
||||
self.ram_changed = ram_changed
|
||||
|
||||
def update_invocation_stats(
|
||||
self,
|
||||
graph_id: str,
|
||||
invocation_type: str,
|
||||
time_used: float,
|
||||
vram_used: float,
|
||||
):
|
||||
if not self._stats[graph_id].nodes.get(invocation_type):
|
||||
self._stats[graph_id].nodes[invocation_type] = NodeStats()
|
||||
stats = self._stats[graph_id].nodes[invocation_type]
|
||||
stats.calls += 1
|
||||
stats.time_used += time_used
|
||||
stats.max_vram = max(stats.max_vram, vram_used)
|
||||
|
||||
def log_stats(self):
|
||||
completed = set()
|
||||
errored = set()
|
||||
for graph_id, _node_log in self._stats.items():
|
||||
try:
|
||||
graph_execution_state = self._invoker.services.graph_execution_manager.get(graph_execution_state_id)
|
||||
current_graph_state = self._invoker.services.graph_execution_manager.get(graph_id)
|
||||
except Exception:
|
||||
# TODO(ryand): What would cause this? Should this exception just be allowed to propagate?
|
||||
logger.warning(f"Failed to get graph state for {graph_execution_state_id}.")
|
||||
errored.add(graph_id)
|
||||
continue
|
||||
|
||||
if not graph_execution_state.is_complete():
|
||||
# The graph is still running, don't prune it.
|
||||
if not current_graph_state.is_complete():
|
||||
continue
|
||||
|
||||
to_prune.append(graph_execution_state_id)
|
||||
total_time = 0
|
||||
logger.info(f"Graph stats: {graph_id}")
|
||||
logger.info(f"{'Node':>30} {'Calls':>7}{'Seconds':>9} {'VRAM Used':>10}")
|
||||
for node_type, stats in self._stats[graph_id].nodes.items():
|
||||
logger.info(f"{node_type:>30} {stats.calls:>4} {stats.time_used:7.3f}s {stats.max_vram:4.3f}G")
|
||||
total_time += stats.time_used
|
||||
|
||||
for graph_execution_state_id in to_prune:
|
||||
del self._stats[graph_execution_state_id]
|
||||
del self._cache_stats[graph_execution_state_id]
|
||||
cache_stats = self._cache_stats[graph_id]
|
||||
hwm = cache_stats.high_watermark / GIG
|
||||
tot = cache_stats.cache_size / GIG
|
||||
loaded = sum(list(cache_stats.loaded_model_sizes.values())) / GIG
|
||||
|
||||
if len(to_prune) > 0:
|
||||
logger.info(f"Pruned stale graph stats for {to_prune}.")
|
||||
logger.info(f"TOTAL GRAPH EXECUTION TIME: {total_time:7.3f}s")
|
||||
logger.info("RAM used by InvokeAI process: " + "%4.2fG" % self.ram_used + f" ({self.ram_changed:+5.3f}G)")
|
||||
logger.info(f"RAM used to load models: {loaded:4.2f}G")
|
||||
if torch.cuda.is_available():
|
||||
logger.info("VRAM in use: " + "%4.3fG" % (torch.cuda.memory_allocated() / GIG))
|
||||
logger.info("RAM cache statistics:")
|
||||
logger.info(f" Model cache hits: {cache_stats.hits}")
|
||||
logger.info(f" Model cache misses: {cache_stats.misses}")
|
||||
logger.info(f" Models cached: {cache_stats.in_cache}")
|
||||
logger.info(f" Models cleared from cache: {cache_stats.cleared}")
|
||||
logger.info(f" Cache high water mark: {hwm:4.2f}/{tot:4.2f}G")
|
||||
|
||||
def reset_stats(self, graph_execution_state_id: str):
|
||||
try:
|
||||
del self._stats[graph_execution_state_id]
|
||||
del self._cache_stats[graph_execution_state_id]
|
||||
except KeyError as e:
|
||||
logger.warning(f"Attempted to clear statistics for unknown graph {graph_execution_state_id}: {e}.")
|
||||
completed.add(graph_id)
|
||||
|
||||
def log_stats(self, graph_execution_state_id: str):
|
||||
try:
|
||||
graph_stats = self._stats[graph_execution_state_id]
|
||||
cache_stats = self._cache_stats[graph_execution_state_id]
|
||||
except KeyError as e:
|
||||
logger.warning(f"Attempted to log statistics for unknown graph {graph_execution_state_id}: {e}.")
|
||||
return
|
||||
for graph_id in completed:
|
||||
del self._stats[graph_id]
|
||||
del self._cache_stats[graph_id]
|
||||
|
||||
log = graph_stats.get_pretty_log(graph_execution_state_id)
|
||||
|
||||
hwm = cache_stats.high_watermark / GB
|
||||
tot = cache_stats.cache_size / GB
|
||||
loaded = sum(list(cache_stats.loaded_model_sizes.values())) / GB
|
||||
log += f"RAM used to load models: {loaded:4.2f}G\n"
|
||||
if torch.cuda.is_available():
|
||||
log += f"VRAM in use: {(torch.cuda.memory_allocated() / GB):4.3f}G\n"
|
||||
log += "RAM cache statistics:\n"
|
||||
log += f" Model cache hits: {cache_stats.hits}\n"
|
||||
log += f" Model cache misses: {cache_stats.misses}\n"
|
||||
log += f" Models cached: {cache_stats.in_cache}\n"
|
||||
log += f" Models cleared from cache: {cache_stats.cleared}\n"
|
||||
log += f" Cache high water mark: {hwm:4.2f}/{tot:4.2f}G\n"
|
||||
logger.info(log)
|
||||
|
||||
del self._stats[graph_execution_state_id]
|
||||
del self._cache_stats[graph_execution_state_id]
|
||||
for graph_id in errored:
|
||||
del self._stats[graph_id]
|
||||
del self._cache_stats[graph_id]
|
||||
|
@ -1,7 +1,6 @@
|
||||
"""Initialization file for model install service package."""
|
||||
|
||||
from .model_install_base import (
|
||||
CivitaiModelSource,
|
||||
HFModelSource,
|
||||
InstallStatus,
|
||||
LocalModelSource,
|
||||
@ -23,5 +22,4 @@ __all__ = [
|
||||
"LocalModelSource",
|
||||
"HFModelSource",
|
||||
"URLModelSource",
|
||||
"CivitaiModelSource",
|
||||
]
|
||||
|
@ -1,42 +1,28 @@
|
||||
# Copyright 2023 Lincoln D. Stein and the InvokeAI development team
|
||||
"""Baseclass definitions for the model installer."""
|
||||
|
||||
import re
|
||||
import traceback
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Literal, Optional, Set, Union
|
||||
from typing import Any, Dict, List, Literal, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field, PrivateAttr, field_validator
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.download import DownloadJob, DownloadQueueServiceBase
|
||||
from invokeai.app.services.events import EventServiceBase
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.model_records import ModelRecordServiceBase
|
||||
from invokeai.backend.model_manager import AnyModelConfig, ModelRepoVariant
|
||||
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata, ModelMetadataStore
|
||||
from invokeai.backend.model_manager import AnyModelConfig
|
||||
|
||||
|
||||
class InstallStatus(str, Enum):
|
||||
"""State of an install job running in the background."""
|
||||
|
||||
WAITING = "waiting" # waiting to be dequeued
|
||||
DOWNLOADING = "downloading" # downloading of model files in process
|
||||
RUNNING = "running" # being processed
|
||||
COMPLETED = "completed" # finished running
|
||||
ERROR = "error" # terminated with an error message
|
||||
CANCELLED = "cancelled" # terminated with an error message
|
||||
|
||||
|
||||
class ModelInstallPart(BaseModel):
|
||||
url: AnyHttpUrl
|
||||
path: Path
|
||||
bytes: int = 0
|
||||
total_bytes: int = 0
|
||||
|
||||
|
||||
class UnknownInstallJobException(Exception):
|
||||
@ -89,31 +75,12 @@ class LocalModelSource(StringLikeSource):
|
||||
return Path(self.path).as_posix()
|
||||
|
||||
|
||||
class CivitaiModelSource(StringLikeSource):
|
||||
"""A Civitai version id, with optional variant and access token."""
|
||||
|
||||
version_id: int
|
||||
variant: Optional[ModelRepoVariant] = None
|
||||
access_token: Optional[str] = None
|
||||
type: Literal["civitai"] = "civitai"
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Return string version of repoid when string rep needed."""
|
||||
base: str = str(self.version_id)
|
||||
base += f" ({self.variant})" if self.variant else ""
|
||||
return base
|
||||
|
||||
|
||||
class HFModelSource(StringLikeSource):
|
||||
"""
|
||||
A HuggingFace repo_id with optional variant, sub-folder and access token.
|
||||
Note that the variant option, if not provided to the constructor, will default to fp16, which is
|
||||
what people (almost) always want.
|
||||
"""
|
||||
"""A HuggingFace repo_id, with optional variant and sub-folder."""
|
||||
|
||||
repo_id: str
|
||||
variant: Optional[ModelRepoVariant] = ModelRepoVariant.FP16
|
||||
subfolder: Optional[Path] = None
|
||||
variant: Optional[str] = None
|
||||
subfolder: Optional[str | Path] = None
|
||||
access_token: Optional[str] = None
|
||||
type: Literal["hf"] = "hf"
|
||||
|
||||
@ -137,22 +104,19 @@ class URLModelSource(StringLikeSource):
|
||||
|
||||
url: AnyHttpUrl
|
||||
access_token: Optional[str] = None
|
||||
type: Literal["url"] = "url"
|
||||
type: Literal["generic_url"] = "generic_url"
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Return string version of the url when string rep needed."""
|
||||
return str(self.url)
|
||||
|
||||
|
||||
ModelSource = Annotated[
|
||||
Union[LocalModelSource, HFModelSource, CivitaiModelSource, URLModelSource], Field(discriminator="type")
|
||||
]
|
||||
ModelSource = Annotated[Union[LocalModelSource, HFModelSource, URLModelSource], Field(discriminator="type")]
|
||||
|
||||
|
||||
class ModelInstallJob(BaseModel):
|
||||
"""Object that tracks the current status of an install request."""
|
||||
|
||||
id: int = Field(description="Unique ID for this job")
|
||||
status: InstallStatus = Field(default=InstallStatus.WAITING, description="Current status of install process")
|
||||
config_in: Dict[str, Any] = Field(
|
||||
default_factory=dict, description="Configuration information (e.g. 'description') to apply to model."
|
||||
@ -165,74 +129,15 @@ class ModelInstallJob(BaseModel):
|
||||
)
|
||||
source: ModelSource = Field(description="Source (URL, repo_id, or local path) of model")
|
||||
local_path: Path = Field(description="Path to locally-downloaded model; may be the same as the source")
|
||||
bytes: Optional[int] = Field(
|
||||
default=None, description="For a remote model, the number of bytes downloaded so far (may not be available)"
|
||||
)
|
||||
total_bytes: int = Field(default=0, description="Total size of the model to be installed")
|
||||
source_metadata: Optional[AnyModelRepoMetadata] = Field(
|
||||
default=None, description="Metadata provided by the model source"
|
||||
)
|
||||
download_parts: Set[DownloadJob] = Field(
|
||||
default_factory=set, description="Download jobs contributing to this install"
|
||||
)
|
||||
# internal flags and transitory settings
|
||||
_install_tmpdir: Optional[Path] = PrivateAttr(default=None)
|
||||
_exception: Optional[Exception] = PrivateAttr(default=None)
|
||||
error_type: Optional[str] = Field(default=None, description="Class name of the exception that led to status==ERROR")
|
||||
error: Optional[str] = Field(default=None, description="Error traceback") # noqa #501
|
||||
|
||||
def set_error(self, e: Exception) -> None:
|
||||
"""Record the error and traceback from an exception."""
|
||||
self._exception = e
|
||||
self.error_type = e.__class__.__name__
|
||||
self.error = "".join(traceback.format_exception(e))
|
||||
self.status = InstallStatus.ERROR
|
||||
|
||||
def cancel(self) -> None:
|
||||
"""Call to cancel the job."""
|
||||
self.status = InstallStatus.CANCELLED
|
||||
|
||||
@property
|
||||
def error_type(self) -> Optional[str]:
|
||||
"""Class name of the exception that led to status==ERROR."""
|
||||
return self._exception.__class__.__name__ if self._exception else None
|
||||
|
||||
@property
|
||||
def error(self) -> Optional[str]:
|
||||
"""Error traceback."""
|
||||
return "".join(traceback.format_exception(self._exception)) if self._exception else None
|
||||
|
||||
@property
|
||||
def cancelled(self) -> bool:
|
||||
"""Set status to CANCELLED."""
|
||||
return self.status == InstallStatus.CANCELLED
|
||||
|
||||
@property
|
||||
def errored(self) -> bool:
|
||||
"""Return true if job has errored."""
|
||||
return self.status == InstallStatus.ERROR
|
||||
|
||||
@property
|
||||
def waiting(self) -> bool:
|
||||
"""Return true if job is waiting to run."""
|
||||
return self.status == InstallStatus.WAITING
|
||||
|
||||
@property
|
||||
def downloading(self) -> bool:
|
||||
"""Return true if job is downloading."""
|
||||
return self.status == InstallStatus.DOWNLOADING
|
||||
|
||||
@property
|
||||
def running(self) -> bool:
|
||||
"""Return true if job is running."""
|
||||
return self.status == InstallStatus.RUNNING
|
||||
|
||||
@property
|
||||
def complete(self) -> bool:
|
||||
"""Return true if job completed without errors."""
|
||||
return self.status == InstallStatus.COMPLETED
|
||||
|
||||
@property
|
||||
def in_terminal_state(self) -> bool:
|
||||
"""Return true if job is in a terminal state."""
|
||||
return self.status in [InstallStatus.COMPLETED, InstallStatus.ERROR, InstallStatus.CANCELLED]
|
||||
|
||||
|
||||
class ModelInstallServiceBase(ABC):
|
||||
"""Abstract base class for InvokeAI model installation."""
|
||||
@ -242,8 +147,6 @@ class ModelInstallServiceBase(ABC):
|
||||
self,
|
||||
app_config: InvokeAIAppConfig,
|
||||
record_store: ModelRecordServiceBase,
|
||||
download_queue: DownloadQueueServiceBase,
|
||||
metadata_store: ModelMetadataStore,
|
||||
event_bus: Optional["EventServiceBase"] = None,
|
||||
):
|
||||
"""
|
||||
@ -254,14 +157,12 @@ class ModelInstallServiceBase(ABC):
|
||||
:param event_bus: InvokeAI event bus for reporting events to.
|
||||
"""
|
||||
|
||||
# make the invoker optional here because we don't need it and it
|
||||
# makes the installer harder to use outside the web app
|
||||
@abstractmethod
|
||||
def start(self, invoker: Optional[Invoker] = None) -> None:
|
||||
"""Start the installer service."""
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
"""Call at InvokeAI startup time."""
|
||||
self.sync_to_config()
|
||||
|
||||
@abstractmethod
|
||||
def stop(self, invoker: Optional[Invoker] = None) -> None:
|
||||
def stop(self) -> None:
|
||||
"""Stop the model install service. After this the objection can be safely deleted."""
|
||||
|
||||
@property
|
||||
@ -364,13 +265,9 @@ class ModelInstallServiceBase(ABC):
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_job_by_source(self, source: ModelSource) -> List[ModelInstallJob]:
|
||||
def get_job(self, source: ModelSource) -> List[ModelInstallJob]:
|
||||
"""Return the ModelInstallJob(s) corresponding to the provided source."""
|
||||
|
||||
@abstractmethod
|
||||
def get_job_by_id(self, id: int) -> ModelInstallJob:
|
||||
"""Return the ModelInstallJob corresponding to the provided id. Raises ValueError if no job has that ID."""
|
||||
|
||||
@abstractmethod
|
||||
def list_jobs(self) -> List[ModelInstallJob]: # noqa D102
|
||||
"""
|
||||
@ -382,19 +279,16 @@ class ModelInstallServiceBase(ABC):
|
||||
"""Prune all completed and errored jobs."""
|
||||
|
||||
@abstractmethod
|
||||
def cancel_job(self, job: ModelInstallJob) -> None:
|
||||
"""Cancel the indicated job."""
|
||||
|
||||
@abstractmethod
|
||||
def wait_for_installs(self, timeout: int = 0) -> List[ModelInstallJob]:
|
||||
def wait_for_installs(self) -> List[ModelInstallJob]:
|
||||
"""
|
||||
Wait for all pending installs to complete.
|
||||
|
||||
This will block until all pending installs have
|
||||
completed, been cancelled, or errored out.
|
||||
completed, been cancelled, or errored out. It will
|
||||
block indefinitely if one or more jobs are in the
|
||||
paused state.
|
||||
|
||||
:param timeout: Wait up to indicated number of seconds. Raise an Exception('timeout') if
|
||||
installs do not complete within the indicated time.
|
||||
It will return the current list of jobs.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
|
@ -1,72 +1,60 @@
|
||||
"""Model installation class."""
|
||||
|
||||
import os
|
||||
import re
|
||||
import threading
|
||||
import time
|
||||
from hashlib import sha256
|
||||
from logging import Logger
|
||||
from pathlib import Path
|
||||
from queue import Empty, Queue
|
||||
from queue import Queue
|
||||
from random import randbytes
|
||||
from shutil import copyfile, copytree, move, rmtree
|
||||
from tempfile import mkdtemp
|
||||
from typing import Any, Dict, List, Optional, Set, Union
|
||||
|
||||
from huggingface_hub import HfFolder
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
from requests import Session
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.download import DownloadJob, DownloadQueueServiceBase
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.model_records import DuplicateModelException, ModelRecordServiceBase, ModelRecordServiceSQL
|
||||
from invokeai.app.services.events import EventServiceBase
|
||||
from invokeai.app.services.model_records import DuplicateModelException, ModelRecordServiceBase, UnknownModelException
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
InvalidModelConfigException,
|
||||
ModelRepoVariant,
|
||||
ModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.hash import FastModelHash
|
||||
from invokeai.backend.model_manager.metadata import (
|
||||
AnyModelRepoMetadata,
|
||||
CivitaiMetadataFetch,
|
||||
HuggingFaceMetadataFetch,
|
||||
ModelMetadataStore,
|
||||
ModelMetadataWithFiles,
|
||||
RemoteModelFile,
|
||||
)
|
||||
from invokeai.backend.model_manager.probe import ModelProbe
|
||||
from invokeai.backend.model_manager.search import ModelSearch
|
||||
from invokeai.backend.util import Chdir, InvokeAILogger
|
||||
from invokeai.backend.util.devices import choose_precision, choose_torch_device
|
||||
|
||||
from .model_install_base import (
|
||||
CivitaiModelSource,
|
||||
HFModelSource,
|
||||
InstallStatus,
|
||||
LocalModelSource,
|
||||
ModelInstallJob,
|
||||
ModelInstallServiceBase,
|
||||
ModelSource,
|
||||
URLModelSource,
|
||||
)
|
||||
|
||||
TMPDIR_PREFIX = "tmpinstall_"
|
||||
# marker that the queue is done and that thread should exit
|
||||
STOP_JOB = ModelInstallJob(
|
||||
source=LocalModelSource(path="stop"),
|
||||
local_path=Path("/dev/null"),
|
||||
)
|
||||
|
||||
|
||||
class ModelInstallService(ModelInstallServiceBase):
|
||||
"""class for InvokeAI model installation."""
|
||||
|
||||
_app_config: InvokeAIAppConfig
|
||||
_record_store: ModelRecordServiceBase
|
||||
_event_bus: Optional[EventServiceBase] = None
|
||||
_install_queue: Queue[ModelInstallJob]
|
||||
_install_jobs: List[ModelInstallJob]
|
||||
_logger: Logger
|
||||
_cached_model_paths: Set[Path]
|
||||
_models_installed: Set[str]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app_config: InvokeAIAppConfig,
|
||||
record_store: ModelRecordServiceBase,
|
||||
download_queue: DownloadQueueServiceBase,
|
||||
metadata_store: Optional[ModelMetadataStore] = None,
|
||||
event_bus: Optional[EventServiceBase] = None,
|
||||
session: Optional[Session] = None,
|
||||
):
|
||||
"""
|
||||
Initialize the installer object.
|
||||
@ -79,26 +67,11 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
self._record_store = record_store
|
||||
self._event_bus = event_bus
|
||||
self._logger = InvokeAILogger.get_logger(name=self.__class__.__name__)
|
||||
self._install_jobs: List[ModelInstallJob] = []
|
||||
self._install_queue: Queue[ModelInstallJob] = Queue()
|
||||
self._cached_model_paths: Set[Path] = set()
|
||||
self._models_installed: Set[str] = set()
|
||||
self._lock = threading.Lock()
|
||||
self._stop_event = threading.Event()
|
||||
self._downloads_changed_event = threading.Event()
|
||||
self._download_queue = download_queue
|
||||
self._download_cache: Dict[AnyHttpUrl, ModelInstallJob] = {}
|
||||
self._running = False
|
||||
self._session = session
|
||||
self._next_job_id = 0
|
||||
# There may not necessarily be a metadata store initialized
|
||||
# so we create one and initialize it with the same sql database
|
||||
# used by the record store service.
|
||||
if metadata_store:
|
||||
self._metadata_store = metadata_store
|
||||
else:
|
||||
assert isinstance(record_store, ModelRecordServiceSQL)
|
||||
self._metadata_store = ModelMetadataStore(record_store.db)
|
||||
self._install_jobs = []
|
||||
self._install_queue = Queue()
|
||||
self._cached_model_paths = set()
|
||||
self._models_installed = set()
|
||||
self._start_installer_thread()
|
||||
|
||||
@property
|
||||
def app_config(self) -> InvokeAIAppConfig: # noqa D102
|
||||
@ -112,31 +85,64 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
def event_bus(self) -> Optional[EventServiceBase]: # noqa D102
|
||||
return self._event_bus
|
||||
|
||||
# make the invoker optional here because we don't need it and it
|
||||
# makes the installer harder to use outside the web app
|
||||
def start(self, invoker: Optional[Invoker] = None) -> None:
|
||||
"""Start the installer thread."""
|
||||
with self._lock:
|
||||
if self._running:
|
||||
raise Exception("Attempt to start the installer service twice")
|
||||
self._start_installer_thread()
|
||||
self._remove_dangling_install_dirs()
|
||||
self.sync_to_config()
|
||||
def stop(self, *args, **kwargs) -> None:
|
||||
"""Stop the install thread; after this the object can be deleted and garbage collected."""
|
||||
self._install_queue.put(STOP_JOB)
|
||||
|
||||
def stop(self, invoker: Optional[Invoker] = None) -> None:
|
||||
"""Stop the installer thread; after this the object can be deleted and garbage collected."""
|
||||
with self._lock:
|
||||
if not self._running:
|
||||
raise Exception("Attempt to stop the install service before it was started")
|
||||
self._stop_event.set()
|
||||
with self._install_queue.mutex:
|
||||
self._install_queue.queue.clear() # get rid of pending jobs
|
||||
active_jobs = [x for x in self.list_jobs() if x.running]
|
||||
if active_jobs:
|
||||
self._logger.warning("Waiting for active install job to complete")
|
||||
self.wait_for_installs()
|
||||
self._download_cache.clear()
|
||||
self._running = False
|
||||
def _start_installer_thread(self) -> None:
|
||||
threading.Thread(target=self._install_next_item, daemon=True).start()
|
||||
|
||||
def _install_next_item(self) -> None:
|
||||
done = False
|
||||
while not done:
|
||||
job = self._install_queue.get()
|
||||
if job == STOP_JOB:
|
||||
done = True
|
||||
continue
|
||||
|
||||
assert job.local_path is not None
|
||||
try:
|
||||
self._signal_job_running(job)
|
||||
if job.inplace:
|
||||
key = self.register_path(job.local_path, job.config_in)
|
||||
else:
|
||||
key = self.install_path(job.local_path, job.config_in)
|
||||
job.config_out = self.record_store.get_model(key)
|
||||
self._signal_job_completed(job)
|
||||
|
||||
except (OSError, DuplicateModelException, InvalidModelConfigException) as excp:
|
||||
self._signal_job_errored(job, excp)
|
||||
finally:
|
||||
self._install_queue.task_done()
|
||||
self._logger.info("Install thread exiting")
|
||||
|
||||
def _signal_job_running(self, job: ModelInstallJob) -> None:
|
||||
job.status = InstallStatus.RUNNING
|
||||
self._logger.info(f"{job.source}: model installation started")
|
||||
if self._event_bus:
|
||||
self._event_bus.emit_model_install_started(str(job.source))
|
||||
|
||||
def _signal_job_completed(self, job: ModelInstallJob) -> None:
|
||||
job.status = InstallStatus.COMPLETED
|
||||
assert job.config_out
|
||||
self._logger.info(
|
||||
f"{job.source}: model installation completed. {job.local_path} registered key {job.config_out.key}"
|
||||
)
|
||||
if self._event_bus:
|
||||
assert job.local_path is not None
|
||||
assert job.config_out is not None
|
||||
key = job.config_out.key
|
||||
self._event_bus.emit_model_install_completed(str(job.source), key)
|
||||
|
||||
def _signal_job_errored(self, job: ModelInstallJob, excp: Exception) -> None:
|
||||
job.set_error(excp)
|
||||
self._logger.info(f"{job.source}: model installation encountered an exception: {job.error_type}")
|
||||
if self._event_bus:
|
||||
error_type = job.error_type
|
||||
error = job.error
|
||||
assert error_type is not None
|
||||
assert error is not None
|
||||
self._event_bus.emit_model_install_error(str(job.source), error_type, error)
|
||||
|
||||
def register_path(
|
||||
self,
|
||||
@ -162,12 +168,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
info: AnyModelConfig = self._probe_model(Path(model_path), config)
|
||||
old_hash = info.original_hash
|
||||
dest_path = self.app_config.models_path / info.base.value / info.type.value / model_path.name
|
||||
try:
|
||||
new_path = self._copy_model(model_path, dest_path)
|
||||
except FileExistsError as excp:
|
||||
raise DuplicateModelException(
|
||||
f"A model named {model_path.name} is already installed at {dest_path.as_posix()}"
|
||||
) from excp
|
||||
new_path = self._copy_model(model_path, dest_path)
|
||||
new_hash = FastModelHash.hash(new_path)
|
||||
assert new_hash == old_hash, f"{model_path}: Model hash changed during installation, possibly corrupted."
|
||||
|
||||
@ -177,56 +178,43 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
info,
|
||||
)
|
||||
|
||||
def import_model(self, source: ModelSource, config: Optional[Dict[str, Any]] = None) -> ModelInstallJob: # noqa D102
|
||||
if isinstance(source, LocalModelSource):
|
||||
install_job = self._import_local_model(source, config)
|
||||
self._install_queue.put(install_job) # synchronously install
|
||||
elif isinstance(source, CivitaiModelSource):
|
||||
install_job = self._import_from_civitai(source, config)
|
||||
elif isinstance(source, HFModelSource):
|
||||
install_job = self._import_from_hf(source, config)
|
||||
elif isinstance(source, URLModelSource):
|
||||
install_job = self._import_from_url(source, config)
|
||||
else:
|
||||
raise ValueError(f"Unsupported model source: '{type(source)}'")
|
||||
def import_model(
|
||||
self,
|
||||
source: ModelSource,
|
||||
config: Optional[Dict[str, Any]] = None,
|
||||
) -> ModelInstallJob: # noqa D102
|
||||
if not config:
|
||||
config = {}
|
||||
|
||||
self._install_jobs.append(install_job)
|
||||
return install_job
|
||||
# Installing a local path
|
||||
if isinstance(source, LocalModelSource) and Path(source.path).exists(): # a path that is already on disk
|
||||
job = ModelInstallJob(
|
||||
source=source,
|
||||
config_in=config,
|
||||
local_path=Path(source.path),
|
||||
)
|
||||
self._install_jobs.append(job)
|
||||
self._install_queue.put(job)
|
||||
return job
|
||||
|
||||
else: # here is where we'd download a URL or repo_id. Implementation pending download queue.
|
||||
raise UnknownModelException("File or directory not found")
|
||||
|
||||
def list_jobs(self) -> List[ModelInstallJob]: # noqa D102
|
||||
return self._install_jobs
|
||||
|
||||
def get_job_by_source(self, source: ModelSource) -> List[ModelInstallJob]: # noqa D102
|
||||
def get_job(self, source: ModelSource) -> List[ModelInstallJob]: # noqa D102
|
||||
return [x for x in self._install_jobs if x.source == source]
|
||||
|
||||
def get_job_by_id(self, id: int) -> ModelInstallJob: # noqa D102
|
||||
jobs = [x for x in self._install_jobs if x.id == id]
|
||||
if not jobs:
|
||||
raise ValueError(f"No job with id {id} known")
|
||||
assert len(jobs) == 1
|
||||
assert isinstance(jobs[0], ModelInstallJob)
|
||||
return jobs[0]
|
||||
|
||||
def wait_for_installs(self, timeout: int = 0) -> List[ModelInstallJob]: # noqa D102
|
||||
"""Block until all installation jobs are done."""
|
||||
start = time.time()
|
||||
while len(self._download_cache) > 0:
|
||||
if self._downloads_changed_event.wait(timeout=5): # in case we miss an event
|
||||
self._downloads_changed_event.clear()
|
||||
if timeout > 0 and time.time() - start > timeout:
|
||||
raise Exception("Timeout exceeded")
|
||||
def wait_for_installs(self) -> List[ModelInstallJob]: # noqa D102
|
||||
self._install_queue.join()
|
||||
return self._install_jobs
|
||||
|
||||
def cancel_job(self, job: ModelInstallJob) -> None:
|
||||
"""Cancel the indicated job."""
|
||||
job.cancel()
|
||||
with self._lock:
|
||||
self._cancel_download_parts(job)
|
||||
|
||||
def prune_jobs(self) -> None:
|
||||
"""Prune all completed and errored jobs."""
|
||||
unfinished_jobs = [x for x in self._install_jobs if not x.in_terminal_state]
|
||||
unfinished_jobs = [
|
||||
x for x in self._install_jobs if x.status not in [InstallStatus.COMPLETED, InstallStatus.ERROR]
|
||||
]
|
||||
self._install_jobs = unfinished_jobs
|
||||
|
||||
def sync_to_config(self) -> None:
|
||||
@ -242,108 +230,10 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
self._cached_model_paths = {Path(x.path) for x in self.record_store.all_models()}
|
||||
callback = self._scan_install if install else self._scan_register
|
||||
search = ModelSearch(on_model_found=callback)
|
||||
self._models_installed.clear()
|
||||
self._models_installed: Set[str] = set()
|
||||
search.search(scan_dir)
|
||||
return list(self._models_installed)
|
||||
|
||||
def unregister(self, key: str) -> None: # noqa D102
|
||||
self.record_store.del_model(key)
|
||||
|
||||
def delete(self, key: str) -> None: # noqa D102
|
||||
"""Unregister the model. Delete its files only if they are within our models directory."""
|
||||
model = self.record_store.get_model(key)
|
||||
models_dir = self.app_config.models_path
|
||||
model_path = models_dir / model.path
|
||||
if model_path.is_relative_to(models_dir):
|
||||
self.unconditionally_delete(key)
|
||||
else:
|
||||
self.unregister(key)
|
||||
|
||||
def unconditionally_delete(self, key: str) -> None: # noqa D102
|
||||
model = self.record_store.get_model(key)
|
||||
path = self.app_config.models_path / model.path
|
||||
if path.is_dir():
|
||||
rmtree(path)
|
||||
else:
|
||||
path.unlink()
|
||||
self.unregister(key)
|
||||
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# Internal functions that manage the installer threads
|
||||
# --------------------------------------------------------------------------------------------
|
||||
def _start_installer_thread(self) -> None:
|
||||
threading.Thread(target=self._install_next_item, daemon=True).start()
|
||||
self._running = True
|
||||
|
||||
def _install_next_item(self) -> None:
|
||||
done = False
|
||||
while not done:
|
||||
if self._stop_event.is_set():
|
||||
done = True
|
||||
continue
|
||||
try:
|
||||
job = self._install_queue.get(timeout=1)
|
||||
except Empty:
|
||||
continue
|
||||
|
||||
assert job.local_path is not None
|
||||
try:
|
||||
if job.cancelled:
|
||||
self._signal_job_cancelled(job)
|
||||
|
||||
elif job.errored:
|
||||
self._signal_job_errored(job)
|
||||
|
||||
elif (
|
||||
job.waiting or job.downloading
|
||||
): # local jobs will be in waiting state, remote jobs will be downloading state
|
||||
job.total_bytes = self._stat_size(job.local_path)
|
||||
job.bytes = job.total_bytes
|
||||
self._signal_job_running(job)
|
||||
if job.inplace:
|
||||
key = self.register_path(job.local_path, job.config_in)
|
||||
else:
|
||||
key = self.install_path(job.local_path, job.config_in)
|
||||
job.config_out = self.record_store.get_model(key)
|
||||
|
||||
# enter the metadata, if there is any
|
||||
if job.source_metadata:
|
||||
self._metadata_store.add_metadata(key, job.source_metadata)
|
||||
self._signal_job_completed(job)
|
||||
|
||||
except InvalidModelConfigException as excp:
|
||||
if any(x.content_type is not None and "text/html" in x.content_type for x in job.download_parts):
|
||||
job.set_error(
|
||||
InvalidModelConfigException(
|
||||
f"At least one file in {job.local_path} is an HTML page, not a model. This can happen when an access token is required to download."
|
||||
)
|
||||
)
|
||||
else:
|
||||
job.set_error(excp)
|
||||
self._signal_job_errored(job)
|
||||
|
||||
except (OSError, DuplicateModelException) as excp:
|
||||
job.set_error(excp)
|
||||
self._signal_job_errored(job)
|
||||
|
||||
finally:
|
||||
# if this is an install of a remote file, then clean up the temporary directory
|
||||
if job._install_tmpdir is not None:
|
||||
rmtree(job._install_tmpdir)
|
||||
self._install_queue.task_done()
|
||||
|
||||
self._logger.info("Install thread exiting")
|
||||
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# Internal functions that manage the models directory
|
||||
# --------------------------------------------------------------------------------------------
|
||||
def _remove_dangling_install_dirs(self) -> None:
|
||||
"""Remove leftover tmpdirs from aborted installs."""
|
||||
path = self._app_config.models_path
|
||||
for tmpdir in path.glob(f"{TMPDIR_PREFIX}*"):
|
||||
self._logger.info(f"Removing dangling temporary directory {tmpdir}")
|
||||
rmtree(tmpdir)
|
||||
|
||||
def _scan_models_directory(self) -> None:
|
||||
"""
|
||||
Scan the models directory for new and missing models.
|
||||
@ -426,6 +316,28 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
pass
|
||||
return True
|
||||
|
||||
def unregister(self, key: str) -> None: # noqa D102
|
||||
self.record_store.del_model(key)
|
||||
|
||||
def delete(self, key: str) -> None: # noqa D102
|
||||
"""Unregister the model. Delete its files only if they are within our models directory."""
|
||||
model = self.record_store.get_model(key)
|
||||
models_dir = self.app_config.models_path
|
||||
model_path = models_dir / model.path
|
||||
if model_path.is_relative_to(models_dir):
|
||||
self.unconditionally_delete(key)
|
||||
else:
|
||||
self.unregister(key)
|
||||
|
||||
def unconditionally_delete(self, key: str) -> None: # noqa D102
|
||||
model = self.record_store.get_model(key)
|
||||
path = self.app_config.models_path / model.path
|
||||
if path.is_dir():
|
||||
rmtree(path)
|
||||
else:
|
||||
path.unlink()
|
||||
self.unregister(key)
|
||||
|
||||
def _copy_model(self, old_path: Path, new_path: Path) -> Path:
|
||||
if old_path == new_path:
|
||||
return old_path
|
||||
@ -481,279 +393,3 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
info.config = legacy_conf.relative_to(self.app_config.root_dir).as_posix()
|
||||
self.record_store.add_model(key, info)
|
||||
return key
|
||||
|
||||
def _next_id(self) -> int:
|
||||
with self._lock:
|
||||
id = self._next_job_id
|
||||
self._next_job_id += 1
|
||||
return id
|
||||
|
||||
@staticmethod
|
||||
def _guess_variant() -> ModelRepoVariant:
|
||||
"""Guess the best HuggingFace variant type to download."""
|
||||
precision = choose_precision(choose_torch_device())
|
||||
return ModelRepoVariant.FP16 if precision == "float16" else ModelRepoVariant.DEFAULT
|
||||
|
||||
def _import_local_model(self, source: LocalModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
|
||||
return ModelInstallJob(
|
||||
id=self._next_id(),
|
||||
source=source,
|
||||
config_in=config or {},
|
||||
local_path=Path(source.path),
|
||||
inplace=source.inplace,
|
||||
)
|
||||
|
||||
def _import_from_civitai(self, source: CivitaiModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
|
||||
if not source.access_token:
|
||||
self._logger.info("No Civitai access token provided; some models may not be downloadable.")
|
||||
metadata = CivitaiMetadataFetch(self._session).from_id(str(source.version_id))
|
||||
assert isinstance(metadata, ModelMetadataWithFiles)
|
||||
remote_files = metadata.download_urls(session=self._session)
|
||||
return self._import_remote_model(source=source, config=config, metadata=metadata, remote_files=remote_files)
|
||||
|
||||
def _import_from_hf(self, source: HFModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
|
||||
# Add user's cached access token to HuggingFace requests
|
||||
source.access_token = source.access_token or HfFolder.get_token()
|
||||
if not source.access_token:
|
||||
self._logger.info("No HuggingFace access token present; some models may not be downloadable.")
|
||||
|
||||
metadata = HuggingFaceMetadataFetch(self._session).from_id(source.repo_id)
|
||||
assert isinstance(metadata, ModelMetadataWithFiles)
|
||||
remote_files = metadata.download_urls(
|
||||
variant=source.variant or self._guess_variant(),
|
||||
subfolder=source.subfolder,
|
||||
session=self._session,
|
||||
)
|
||||
|
||||
return self._import_remote_model(
|
||||
source=source,
|
||||
config=config,
|
||||
remote_files=remote_files,
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
def _import_from_url(self, source: URLModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
|
||||
# URLs from Civitai or HuggingFace will be handled specially
|
||||
url_patterns = {
|
||||
r"https?://civitai.com/": CivitaiMetadataFetch,
|
||||
r"https?://huggingface.co/": HuggingFaceMetadataFetch,
|
||||
}
|
||||
metadata = None
|
||||
for pattern, fetcher in url_patterns.items():
|
||||
if re.match(pattern, str(source.url), re.IGNORECASE):
|
||||
metadata = fetcher(self._session).from_url(source.url)
|
||||
break
|
||||
if metadata and isinstance(metadata, ModelMetadataWithFiles):
|
||||
remote_files = metadata.download_urls(session=self._session)
|
||||
else:
|
||||
remote_files = [RemoteModelFile(url=source.url, path=Path("."), size=0)]
|
||||
|
||||
return self._import_remote_model(
|
||||
source=source,
|
||||
config=config,
|
||||
metadata=metadata,
|
||||
remote_files=remote_files,
|
||||
)
|
||||
|
||||
def _import_remote_model(
|
||||
self,
|
||||
source: ModelSource,
|
||||
remote_files: List[RemoteModelFile],
|
||||
metadata: Optional[AnyModelRepoMetadata],
|
||||
config: Optional[Dict[str, Any]],
|
||||
) -> ModelInstallJob:
|
||||
# TODO: Replace with tempfile.tmpdir() when multithreading is cleaned up.
|
||||
# Currently the tmpdir isn't automatically removed at exit because it is
|
||||
# being held in a daemon thread.
|
||||
tmpdir = Path(
|
||||
mkdtemp(
|
||||
dir=self._app_config.models_path,
|
||||
prefix=TMPDIR_PREFIX,
|
||||
)
|
||||
)
|
||||
install_job = ModelInstallJob(
|
||||
id=self._next_id(),
|
||||
source=source,
|
||||
config_in=config or {},
|
||||
source_metadata=metadata,
|
||||
local_path=tmpdir, # local path may change once the download has started due to content-disposition handling
|
||||
bytes=0,
|
||||
total_bytes=0,
|
||||
)
|
||||
# we remember the path up to the top of the tmpdir so that it may be
|
||||
# removed safely at the end of the install process.
|
||||
install_job._install_tmpdir = tmpdir
|
||||
assert install_job.total_bytes is not None # to avoid type checking complaints in the loop below
|
||||
|
||||
self._logger.info(f"Queuing {source} for downloading")
|
||||
for model_file in remote_files:
|
||||
url = model_file.url
|
||||
path = model_file.path
|
||||
self._logger.info(f"Downloading {url} => {path}")
|
||||
install_job.total_bytes += model_file.size
|
||||
assert hasattr(source, "access_token")
|
||||
dest = tmpdir / path.parent
|
||||
dest.mkdir(parents=True, exist_ok=True)
|
||||
download_job = DownloadJob(
|
||||
source=url,
|
||||
dest=dest,
|
||||
access_token=source.access_token,
|
||||
)
|
||||
self._download_cache[download_job.source] = install_job # matches a download job to an install job
|
||||
install_job.download_parts.add(download_job)
|
||||
|
||||
self._download_queue.submit_download_job(
|
||||
download_job,
|
||||
on_start=self._download_started_callback,
|
||||
on_progress=self._download_progress_callback,
|
||||
on_complete=self._download_complete_callback,
|
||||
on_error=self._download_error_callback,
|
||||
on_cancelled=self._download_cancelled_callback,
|
||||
)
|
||||
return install_job
|
||||
|
||||
def _stat_size(self, path: Path) -> int:
|
||||
size = 0
|
||||
if path.is_file():
|
||||
size = path.stat().st_size
|
||||
elif path.is_dir():
|
||||
for root, _, files in os.walk(path):
|
||||
size += sum(self._stat_size(Path(root, x)) for x in files)
|
||||
return size
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Callbacks are executed by the download queue in a separate thread
|
||||
# ------------------------------------------------------------------
|
||||
def _download_started_callback(self, download_job: DownloadJob) -> None:
|
||||
self._logger.info(f"{download_job.source}: model download started")
|
||||
with self._lock:
|
||||
install_job = self._download_cache[download_job.source]
|
||||
install_job.status = InstallStatus.DOWNLOADING
|
||||
|
||||
assert download_job.download_path
|
||||
if install_job.local_path == install_job._install_tmpdir:
|
||||
partial_path = download_job.download_path.relative_to(install_job._install_tmpdir)
|
||||
dest_name = partial_path.parts[0]
|
||||
install_job.local_path = install_job._install_tmpdir / dest_name
|
||||
|
||||
# Update the total bytes count for remote sources.
|
||||
if not install_job.total_bytes:
|
||||
install_job.total_bytes = sum(x.total_bytes for x in install_job.download_parts)
|
||||
|
||||
def _download_progress_callback(self, download_job: DownloadJob) -> None:
|
||||
with self._lock:
|
||||
install_job = self._download_cache[download_job.source]
|
||||
if install_job.cancelled: # This catches the case in which the caller directly calls job.cancel()
|
||||
self._cancel_download_parts(install_job)
|
||||
else:
|
||||
# update sizes
|
||||
install_job.bytes = sum(x.bytes for x in install_job.download_parts)
|
||||
self._signal_job_downloading(install_job)
|
||||
|
||||
def _download_complete_callback(self, download_job: DownloadJob) -> None:
|
||||
with self._lock:
|
||||
install_job = self._download_cache[download_job.source]
|
||||
self._download_cache.pop(download_job.source, None)
|
||||
|
||||
# are there any more active jobs left in this task?
|
||||
if all(x.complete for x in install_job.download_parts):
|
||||
# now enqueue job for actual installation into the models directory
|
||||
self._install_queue.put(install_job)
|
||||
|
||||
# Let other threads know that the number of downloads has changed
|
||||
self._downloads_changed_event.set()
|
||||
|
||||
def _download_error_callback(self, download_job: DownloadJob, excp: Optional[Exception] = None) -> None:
|
||||
with self._lock:
|
||||
install_job = self._download_cache.pop(download_job.source, None)
|
||||
assert install_job is not None
|
||||
assert excp is not None
|
||||
install_job.set_error(excp)
|
||||
self._logger.error(
|
||||
f"Cancelling {install_job.source} due to an error while downloading {download_job.source}: {str(excp)}"
|
||||
)
|
||||
self._cancel_download_parts(install_job)
|
||||
|
||||
# Let other threads know that the number of downloads has changed
|
||||
self._downloads_changed_event.set()
|
||||
|
||||
def _download_cancelled_callback(self, download_job: DownloadJob) -> None:
|
||||
with self._lock:
|
||||
install_job = self._download_cache.pop(download_job.source, None)
|
||||
if not install_job:
|
||||
return
|
||||
self._downloads_changed_event.set()
|
||||
self._logger.warning(f"Download {download_job.source} cancelled.")
|
||||
# if install job has already registered an error, then do not replace its status with cancelled
|
||||
if not install_job.errored:
|
||||
install_job.cancel()
|
||||
self._cancel_download_parts(install_job)
|
||||
|
||||
# Let other threads know that the number of downloads has changed
|
||||
self._downloads_changed_event.set()
|
||||
|
||||
def _cancel_download_parts(self, install_job: ModelInstallJob) -> None:
|
||||
# on multipart downloads, _cancel_components() will get called repeatedly from the download callbacks
|
||||
# do not lock here because it gets called within a locked context
|
||||
for s in install_job.download_parts:
|
||||
self._download_queue.cancel_job(s)
|
||||
|
||||
if all(x.in_terminal_state for x in install_job.download_parts):
|
||||
# When all parts have reached their terminal state, we finalize the job to clean up the temporary directory and other resources
|
||||
self._install_queue.put(install_job)
|
||||
|
||||
# ------------------------------------------------------------------------------------------------
|
||||
# Internal methods that put events on the event bus
|
||||
# ------------------------------------------------------------------------------------------------
|
||||
def _signal_job_running(self, job: ModelInstallJob) -> None:
|
||||
job.status = InstallStatus.RUNNING
|
||||
self._logger.info(f"{job.source}: model installation started")
|
||||
if self._event_bus:
|
||||
self._event_bus.emit_model_install_running(str(job.source))
|
||||
|
||||
def _signal_job_downloading(self, job: ModelInstallJob) -> None:
|
||||
if self._event_bus:
|
||||
parts: List[Dict[str, str | int]] = [
|
||||
{
|
||||
"url": str(x.source),
|
||||
"local_path": str(x.download_path),
|
||||
"bytes": x.bytes,
|
||||
"total_bytes": x.total_bytes,
|
||||
}
|
||||
for x in job.download_parts
|
||||
]
|
||||
assert job.bytes is not None
|
||||
assert job.total_bytes is not None
|
||||
self._event_bus.emit_model_install_downloading(
|
||||
str(job.source),
|
||||
local_path=job.local_path.as_posix(),
|
||||
parts=parts,
|
||||
bytes=job.bytes,
|
||||
total_bytes=job.total_bytes,
|
||||
)
|
||||
|
||||
def _signal_job_completed(self, job: ModelInstallJob) -> None:
|
||||
job.status = InstallStatus.COMPLETED
|
||||
assert job.config_out
|
||||
self._logger.info(
|
||||
f"{job.source}: model installation completed. {job.local_path} registered key {job.config_out.key}"
|
||||
)
|
||||
if self._event_bus:
|
||||
assert job.local_path is not None
|
||||
assert job.config_out is not None
|
||||
key = job.config_out.key
|
||||
self._event_bus.emit_model_install_completed(str(job.source), key)
|
||||
|
||||
def _signal_job_errored(self, job: ModelInstallJob) -> None:
|
||||
self._logger.info(f"{job.source}: model installation encountered an exception: {job.error_type}\n{job.error}")
|
||||
if self._event_bus:
|
||||
error_type = job.error_type
|
||||
error = job.error
|
||||
assert error_type is not None
|
||||
assert error is not None
|
||||
self._event_bus.emit_model_install_error(str(job.source), error_type, error)
|
||||
|
||||
def _signal_job_cancelled(self, job: ModelInstallJob) -> None:
|
||||
self._logger.info(f"{job.source}: model installation was cancelled")
|
||||
if self._event_bus:
|
||||
self._event_bus.emit_model_install_cancelled(str(job.source))
|
||||
|
@ -4,8 +4,6 @@ from .model_records_base import ( # noqa F401
|
||||
InvalidModelException,
|
||||
ModelRecordServiceBase,
|
||||
UnknownModelException,
|
||||
ModelSummary,
|
||||
ModelRecordOrderBy,
|
||||
)
|
||||
from .model_records_sql import ModelRecordServiceSQL # noqa F401
|
||||
|
||||
@ -15,6 +13,4 @@ __all__ = [
|
||||
"DuplicateModelException",
|
||||
"InvalidModelException",
|
||||
"UnknownModelException",
|
||||
"ModelSummary",
|
||||
"ModelRecordOrderBy",
|
||||
]
|
||||
|
@ -4,15 +4,10 @@ Abstract base class for storing and retrieving model configuration records.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
||||
from typing import List, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.services.shared.pagination import PaginatedResults
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelFormat, ModelType
|
||||
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata, ModelMetadataStore
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelType
|
||||
|
||||
|
||||
class DuplicateModelException(Exception):
|
||||
@ -31,33 +26,11 @@ class ConfigFileVersionMismatchException(Exception):
|
||||
"""Raised on an attempt to open a config with an incompatible version."""
|
||||
|
||||
|
||||
class ModelRecordOrderBy(str, Enum):
|
||||
"""The order in which to return model summaries."""
|
||||
|
||||
Default = "default" # order by type, base, format and name
|
||||
Type = "type"
|
||||
Base = "base"
|
||||
Name = "name"
|
||||
Format = "format"
|
||||
|
||||
|
||||
class ModelSummary(BaseModel):
|
||||
"""A short summary of models for UI listing purposes."""
|
||||
|
||||
key: str = Field(description="model key")
|
||||
type: ModelType = Field(description="model type")
|
||||
base: BaseModelType = Field(description="base model")
|
||||
format: ModelFormat = Field(description="model format")
|
||||
name: str = Field(description="model name")
|
||||
description: str = Field(description="short description of model")
|
||||
tags: Set[str] = Field(description="tags associated with model")
|
||||
|
||||
|
||||
class ModelRecordServiceBase(ABC):
|
||||
"""Abstract base class for storage and retrieval of model configs."""
|
||||
|
||||
@abstractmethod
|
||||
def add_model(self, key: str, config: Union[Dict[str, Any], AnyModelConfig]) -> AnyModelConfig:
|
||||
def add_model(self, key: str, config: Union[dict, AnyModelConfig]) -> AnyModelConfig:
|
||||
"""
|
||||
Add a model to the database.
|
||||
|
||||
@ -81,7 +54,7 @@ class ModelRecordServiceBase(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def update_model(self, key: str, config: Union[Dict[str, Any], AnyModelConfig]) -> AnyModelConfig:
|
||||
def update_model(self, key: str, config: Union[dict, AnyModelConfig]) -> AnyModelConfig:
|
||||
"""
|
||||
Update the model, returning the updated version.
|
||||
|
||||
@ -102,47 +75,6 @@ class ModelRecordServiceBase(ABC):
|
||||
"""
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def metadata_store(self) -> ModelMetadataStore:
|
||||
"""Return a ModelMetadataStore initialized on the same database."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_metadata(self, key: str) -> Optional[AnyModelRepoMetadata]:
|
||||
"""
|
||||
Retrieve metadata (if any) from when model was downloaded from a repo.
|
||||
|
||||
:param key: Model key
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def list_all_metadata(self) -> List[Tuple[str, AnyModelRepoMetadata]]:
|
||||
"""List metadata for all models that have it."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def search_by_metadata_tag(self, tags: Set[str]) -> List[AnyModelConfig]:
|
||||
"""
|
||||
Search model metadata for ones with all listed tags and return their corresponding configs.
|
||||
|
||||
:param tags: Set of tags to search for. All tags must be present.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def list_tags(self) -> Set[str]:
|
||||
"""Return a unique set of all the model tags in the metadata database."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def list_models(
|
||||
self, page: int = 0, per_page: int = 10, order_by: ModelRecordOrderBy = ModelRecordOrderBy.Default
|
||||
) -> PaginatedResults[ModelSummary]:
|
||||
"""Return a paginated summary listing of each model in the database."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def exists(self, key: str) -> bool:
|
||||
"""
|
||||
@ -174,7 +106,6 @@ class ModelRecordServiceBase(ABC):
|
||||
model_name: Optional[str] = None,
|
||||
base_model: Optional[BaseModelType] = None,
|
||||
model_type: Optional[ModelType] = None,
|
||||
model_format: Optional[ModelFormat] = None,
|
||||
) -> List[AnyModelConfig]:
|
||||
"""
|
||||
Return models matching name, base and/or type.
|
||||
@ -182,7 +113,6 @@ class ModelRecordServiceBase(ABC):
|
||||
:param model_name: Filter by name of model (optional)
|
||||
:param base_model: Filter by base model (optional)
|
||||
:param model_type: Filter by type of model (optional)
|
||||
:param model_format: Filter by model format (e.g. "diffusers") (optional)
|
||||
|
||||
If none of the optional filters are passed, will return all
|
||||
models in the database.
|
||||
|
@ -42,26 +42,20 @@ Typical usage:
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from math import ceil
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
||||
from typing import List, Optional, Union
|
||||
|
||||
from invokeai.app.services.shared.pagination import PaginatedResults
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelConfigFactory,
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata, ModelMetadataStore, UnknownMetadataException
|
||||
|
||||
from ..shared.sqlite.sqlite_database import SqliteDatabase
|
||||
from .model_records_base import (
|
||||
DuplicateModelException,
|
||||
ModelRecordOrderBy,
|
||||
ModelRecordServiceBase,
|
||||
ModelSummary,
|
||||
UnknownModelException,
|
||||
)
|
||||
|
||||
@ -69,6 +63,9 @@ from .model_records_base import (
|
||||
class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
"""Implementation of the ModelConfigStore ABC using a SQL database."""
|
||||
|
||||
_db: SqliteDatabase
|
||||
_cursor: sqlite3.Cursor
|
||||
|
||||
def __init__(self, db: SqliteDatabase):
|
||||
"""
|
||||
Initialize a new object from preexisting sqlite3 connection and threading lock objects.
|
||||
@ -80,12 +77,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
self._db = db
|
||||
self._cursor = self._db.conn.cursor()
|
||||
|
||||
@property
|
||||
def db(self) -> SqliteDatabase:
|
||||
"""Return the underlying database."""
|
||||
return self._db
|
||||
|
||||
def add_model(self, key: str, config: Union[Dict[str, Any], AnyModelConfig]) -> AnyModelConfig:
|
||||
def add_model(self, key: str, config: Union[dict, AnyModelConfig]) -> AnyModelConfig:
|
||||
"""
|
||||
Add a model to the database.
|
||||
|
||||
@ -233,7 +225,6 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
model_name: Optional[str] = None,
|
||||
base_model: Optional[BaseModelType] = None,
|
||||
model_type: Optional[ModelType] = None,
|
||||
model_format: Optional[ModelFormat] = None,
|
||||
) -> List[AnyModelConfig]:
|
||||
"""
|
||||
Return models matching name, base and/or type.
|
||||
@ -241,7 +232,6 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
:param model_name: Filter by name of model (optional)
|
||||
:param base_model: Filter by base model (optional)
|
||||
:param model_type: Filter by type of model (optional)
|
||||
:param model_format: Filter by model format (e.g. "diffusers") (optional)
|
||||
|
||||
If none of the optional filters are passed, will return all
|
||||
models in the database.
|
||||
@ -258,9 +248,6 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
if model_type:
|
||||
where_clause.append("type=?")
|
||||
bindings.append(model_type)
|
||||
if model_format:
|
||||
where_clause.append("format=?")
|
||||
bindings.append(model_format)
|
||||
where = f"WHERE {' AND '.join(where_clause)}" if where_clause else ""
|
||||
with self._db.lock:
|
||||
self._cursor.execute(
|
||||
@ -300,95 +287,3 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
)
|
||||
results = [ModelConfigFactory.make_config(json.loads(x[0])) for x in self._cursor.fetchall()]
|
||||
return results
|
||||
|
||||
@property
|
||||
def metadata_store(self) -> ModelMetadataStore:
|
||||
"""Return a ModelMetadataStore initialized on the same database."""
|
||||
return ModelMetadataStore(self._db)
|
||||
|
||||
def get_metadata(self, key: str) -> Optional[AnyModelRepoMetadata]:
|
||||
"""
|
||||
Retrieve metadata (if any) from when model was downloaded from a repo.
|
||||
|
||||
:param key: Model key
|
||||
"""
|
||||
store = self.metadata_store
|
||||
try:
|
||||
metadata = store.get_metadata(key)
|
||||
return metadata
|
||||
except UnknownMetadataException:
|
||||
return None
|
||||
|
||||
def search_by_metadata_tag(self, tags: Set[str]) -> List[AnyModelConfig]:
|
||||
"""
|
||||
Search model metadata for ones with all listed tags and return their corresponding configs.
|
||||
|
||||
:param tags: Set of tags to search for. All tags must be present.
|
||||
"""
|
||||
store = ModelMetadataStore(self._db)
|
||||
keys = store.search_by_tag(tags)
|
||||
return [self.get_model(x) for x in keys]
|
||||
|
||||
def list_tags(self) -> Set[str]:
|
||||
"""Return a unique set of all the model tags in the metadata database."""
|
||||
store = ModelMetadataStore(self._db)
|
||||
return store.list_tags()
|
||||
|
||||
def list_all_metadata(self) -> List[Tuple[str, AnyModelRepoMetadata]]:
|
||||
"""List metadata for all models that have it."""
|
||||
store = ModelMetadataStore(self._db)
|
||||
return store.list_all_metadata()
|
||||
|
||||
def list_models(
|
||||
self, page: int = 0, per_page: int = 10, order_by: ModelRecordOrderBy = ModelRecordOrderBy.Default
|
||||
) -> PaginatedResults[ModelSummary]:
|
||||
"""Return a paginated summary listing of each model in the database."""
|
||||
ordering = {
|
||||
ModelRecordOrderBy.Default: "a.type, a.base, a.format, a.name",
|
||||
ModelRecordOrderBy.Type: "a.type",
|
||||
ModelRecordOrderBy.Base: "a.base",
|
||||
ModelRecordOrderBy.Name: "a.name",
|
||||
ModelRecordOrderBy.Format: "a.format",
|
||||
}
|
||||
|
||||
def _fixup(summary: Dict[str, str]) -> Dict[str, Union[str, int, Set[str]]]:
|
||||
"""Fix up results so that there are no null values."""
|
||||
result: Dict[str, Union[str, int, Set[str]]] = {}
|
||||
for key, item in summary.items():
|
||||
result[key] = item or ""
|
||||
result["tags"] = set(json.loads(summary["tags"] or "[]"))
|
||||
return result
|
||||
|
||||
# Lock so that the database isn't updated while we're doing the two queries.
|
||||
with self._db.lock:
|
||||
# query1: get the total number of model configs
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
select count(*) from model_config;
|
||||
""",
|
||||
(),
|
||||
)
|
||||
total = int(self._cursor.fetchone()[0])
|
||||
|
||||
# query2: fetch key fields from the join of model_config and model_metadata
|
||||
self._cursor.execute(
|
||||
f"""--sql
|
||||
SELECT a.id as key, a.type, a.base, a.format, a.name,
|
||||
json_extract(a.config, '$.description') as description,
|
||||
json_extract(b.metadata, '$.tags') as tags
|
||||
FROM model_config AS a
|
||||
LEFT JOIN model_metadata AS b on a.id=b.id
|
||||
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason
|
||||
LIMIT ?
|
||||
OFFSET ?;
|
||||
""",
|
||||
(
|
||||
per_page,
|
||||
page * per_page,
|
||||
),
|
||||
)
|
||||
rows = self._cursor.fetchall()
|
||||
items = [ModelSummary.model_validate(_fixup(dict(x))) for x in rows]
|
||||
return PaginatedResults(
|
||||
page=page, pages=ceil(total / per_page), per_page=per_page, total=total, items=items
|
||||
)
|
||||
|
@ -5,8 +5,6 @@ from invokeai.app.services.image_files.image_files_base import ImageFileStorageB
|
||||
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_1 import build_migration_1
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_2 import build_migration_2
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_3 import build_migration_3
|
||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_4 import build_migration_4
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
|
||||
|
||||
|
||||
@ -29,8 +27,6 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
|
||||
migrator = SqliteMigrator(db=db)
|
||||
migrator.register_migration(build_migration_1())
|
||||
migrator.register_migration(build_migration_2(image_files=image_files, logger=logger))
|
||||
migrator.register_migration(build_migration_3(app_config=config, logger=logger))
|
||||
migrator.register_migration(build_migration_4())
|
||||
migrator.run_migrations()
|
||||
|
||||
return db
|
||||
|
@ -1,15 +1,8 @@
|
||||
import sqlite3
|
||||
from logging import Logger
|
||||
|
||||
from pydantic import ValidationError
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.app.services.image_files.image_files_base import ImageFileStorageBase
|
||||
from invokeai.app.services.image_files.image_files_common import ImageFileNotFoundException
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import (
|
||||
UnsafeWorkflowWithVersionValidator,
|
||||
)
|
||||
|
||||
|
||||
class Migration2Callback:
|
||||
@ -23,6 +16,7 @@ class Migration2Callback:
|
||||
self._drop_old_workflow_tables(cursor)
|
||||
self._add_workflow_library(cursor)
|
||||
self._drop_model_manager_metadata(cursor)
|
||||
self._recreate_model_config(cursor)
|
||||
self._migrate_embedded_workflows(cursor)
|
||||
|
||||
def _add_images_has_workflow(self, cursor: sqlite3.Cursor) -> None:
|
||||
@ -96,6 +90,40 @@ class Migration2Callback:
|
||||
"""Drops the `model_manager_metadata` table."""
|
||||
cursor.execute("DROP TABLE IF EXISTS model_manager_metadata;")
|
||||
|
||||
def _recreate_model_config(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""
|
||||
Drops the `model_config` table, recreating it.
|
||||
|
||||
In 3.4.0, this table used explicit columns but was changed to use json_extract 3.5.0.
|
||||
|
||||
Because this table is not used in production, we are able to simply drop it and recreate it.
|
||||
"""
|
||||
|
||||
cursor.execute("DROP TABLE IF EXISTS model_config;")
|
||||
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS model_config (
|
||||
id TEXT NOT NULL PRIMARY KEY,
|
||||
-- The next 3 fields are enums in python, unrestricted string here
|
||||
base TEXT GENERATED ALWAYS as (json_extract(config, '$.base')) VIRTUAL NOT NULL,
|
||||
type TEXT GENERATED ALWAYS as (json_extract(config, '$.type')) VIRTUAL NOT NULL,
|
||||
name TEXT GENERATED ALWAYS as (json_extract(config, '$.name')) VIRTUAL NOT NULL,
|
||||
path TEXT GENERATED ALWAYS as (json_extract(config, '$.path')) VIRTUAL NOT NULL,
|
||||
format TEXT GENERATED ALWAYS as (json_extract(config, '$.format')) VIRTUAL NOT NULL,
|
||||
original_hash TEXT, -- could be null
|
||||
-- Serialized JSON representation of the whole config object,
|
||||
-- which will contain additional fields from subclasses
|
||||
config TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Updated via trigger
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- unique constraint on combo of name, base and type
|
||||
UNIQUE(name, base, type)
|
||||
);
|
||||
"""
|
||||
)
|
||||
|
||||
def _migrate_embedded_workflows(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""
|
||||
In the v3.5.0 release, InvokeAI changed how it handles embedded workflows. The `images` table in
|
||||
@ -104,7 +132,7 @@ class Migration2Callback:
|
||||
This migrate callback checks each image for the presence of an embedded workflow, then updates its entry
|
||||
in the database accordingly.
|
||||
"""
|
||||
# Get all image names
|
||||
# Get the total number of images and chunk it into pages
|
||||
cursor.execute("SELECT image_name FROM images")
|
||||
image_names: list[str] = [image[0] for image in cursor.fetchall()]
|
||||
total_image_names = len(image_names)
|
||||
@ -121,19 +149,11 @@ class Migration2Callback:
|
||||
pbar.set_description(f"Checking image {idx + 1}/{total_image_names} for workflow")
|
||||
try:
|
||||
pil_image = self._image_files.get(image_name)
|
||||
except ImageFileNotFoundException:
|
||||
self._logger.warning(f"Image {image_name} not found, skipping")
|
||||
continue
|
||||
except Exception as e:
|
||||
self._logger.warning(f"Error while checking image {image_name}, skipping: {e}")
|
||||
continue
|
||||
if "invokeai_workflow" in pil_image.info:
|
||||
try:
|
||||
UnsafeWorkflowWithVersionValidator.validate_json(pil_image.info.get("invokeai_workflow", ""))
|
||||
except ValidationError:
|
||||
self._logger.warning(f"Image {image_name} has invalid embedded workflow, skipping")
|
||||
continue
|
||||
to_migrate.append((True, image_name))
|
||||
if "invokeai_workflow" in pil_image.info:
|
||||
to_migrate.append((True, image_name))
|
||||
except KeyError:
|
||||
self._logger.warning(f"Image '{image_name}' not found. Skipping.")
|
||||
continue
|
||||
|
||||
self._logger.info(f"Adding {len(to_migrate)} embedded workflows to database")
|
||||
cursor.executemany("UPDATE images SET has_workflow = ? WHERE image_name = ?", to_migrate)
|
||||
|
@ -1,79 +0,0 @@
|
||||
import sqlite3
|
||||
from logging import Logger
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||
|
||||
from .util.migrate_yaml_config_1 import MigrateModelYamlToDb1
|
||||
|
||||
|
||||
class Migration3Callback:
|
||||
def __init__(self, app_config: InvokeAIAppConfig, logger: Logger) -> None:
|
||||
self._app_config = app_config
|
||||
self._logger = logger
|
||||
|
||||
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||
self._drop_model_manager_metadata(cursor)
|
||||
self._recreate_model_config(cursor)
|
||||
self._migrate_model_config_records(cursor)
|
||||
|
||||
def _drop_model_manager_metadata(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Drops the `model_manager_metadata` table."""
|
||||
cursor.execute("DROP TABLE IF EXISTS model_manager_metadata;")
|
||||
|
||||
def _recreate_model_config(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""
|
||||
Drops the `model_config` table, recreating it.
|
||||
|
||||
In 3.4.0, this table used explicit columns but was changed to use json_extract 3.5.0.
|
||||
|
||||
Because this table is not used in production, we are able to simply drop it and recreate it.
|
||||
"""
|
||||
|
||||
cursor.execute("DROP TABLE IF EXISTS model_config;")
|
||||
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS model_config (
|
||||
id TEXT NOT NULL PRIMARY KEY,
|
||||
-- The next 3 fields are enums in python, unrestricted string here
|
||||
base TEXT GENERATED ALWAYS as (json_extract(config, '$.base')) VIRTUAL NOT NULL,
|
||||
type TEXT GENERATED ALWAYS as (json_extract(config, '$.type')) VIRTUAL NOT NULL,
|
||||
name TEXT GENERATED ALWAYS as (json_extract(config, '$.name')) VIRTUAL NOT NULL,
|
||||
path TEXT GENERATED ALWAYS as (json_extract(config, '$.path')) VIRTUAL NOT NULL,
|
||||
format TEXT GENERATED ALWAYS as (json_extract(config, '$.format')) VIRTUAL NOT NULL,
|
||||
original_hash TEXT, -- could be null
|
||||
-- Serialized JSON representation of the whole config object,
|
||||
-- which will contain additional fields from subclasses
|
||||
config TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Updated via trigger
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- unique constraint on combo of name, base and type
|
||||
UNIQUE(name, base, type)
|
||||
);
|
||||
"""
|
||||
)
|
||||
|
||||
def _migrate_model_config_records(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""After updating the model config table, we repopulate it."""
|
||||
self._logger.info("Migrating model config records from models.yaml to database")
|
||||
model_record_migrator = MigrateModelYamlToDb1(self._app_config, self._logger, cursor)
|
||||
model_record_migrator.migrate()
|
||||
|
||||
|
||||
def build_migration_3(app_config: InvokeAIAppConfig, logger: Logger) -> Migration:
|
||||
"""
|
||||
Build the migration from database version 2 to 3.
|
||||
|
||||
This migration does the following:
|
||||
- Drops the `model_config` table, recreating it
|
||||
- Migrates data from `models.yaml` into the `model_config` table
|
||||
"""
|
||||
migration_3 = Migration(
|
||||
from_version=2,
|
||||
to_version=3,
|
||||
callback=Migration3Callback(app_config=app_config, logger=logger),
|
||||
)
|
||||
|
||||
return migration_3
|
@ -1,83 +0,0 @@
|
||||
import sqlite3
|
||||
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||
|
||||
|
||||
class Migration4Callback:
|
||||
"""Callback to do step 4 of migration."""
|
||||
|
||||
def __call__(self, cursor: sqlite3.Cursor) -> None: # noqa D102
|
||||
self._create_model_metadata(cursor)
|
||||
self._create_model_tags(cursor)
|
||||
self._create_tags(cursor)
|
||||
self._create_triggers(cursor)
|
||||
|
||||
def _create_model_metadata(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Create the table used to store model metadata downloaded from remote sources."""
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS model_metadata (
|
||||
id TEXT NOT NULL PRIMARY KEY,
|
||||
name TEXT GENERATED ALWAYS AS (json_extract(metadata, '$.name')) VIRTUAL NOT NULL,
|
||||
author TEXT GENERATED ALWAYS AS (json_extract(metadata, '$.author')) VIRTUAL NOT NULL,
|
||||
-- Serialized JSON representation of the whole metadata object,
|
||||
-- which will contain additional fields from subclasses
|
||||
metadata TEXT NOT NULL,
|
||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
-- Updated via trigger
|
||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||
FOREIGN KEY(id) REFERENCES model_config(id) ON DELETE CASCADE
|
||||
);
|
||||
"""
|
||||
)
|
||||
|
||||
def _create_model_tags(self, cursor: sqlite3.Cursor) -> None:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS model_tags (
|
||||
model_id TEXT NOT NULL,
|
||||
tag_id INTEGER NOT NULL,
|
||||
FOREIGN KEY(model_id) REFERENCES model_config(id) ON DELETE CASCADE,
|
||||
FOREIGN KEY(tag_id) REFERENCES tags(tag_id) ON DELETE CASCADE,
|
||||
UNIQUE(model_id,tag_id)
|
||||
);
|
||||
"""
|
||||
)
|
||||
|
||||
def _create_tags(self, cursor: sqlite3.Cursor) -> None:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
CREATE TABLE IF NOT EXISTS tags (
|
||||
tag_id INTEGER NOT NULL PRIMARY KEY,
|
||||
tag_text TEXT NOT NULL UNIQUE
|
||||
);
|
||||
"""
|
||||
)
|
||||
|
||||
def _create_triggers(self, cursor: sqlite3.Cursor) -> None:
|
||||
cursor.execute(
|
||||
"""--sql
|
||||
CREATE TRIGGER IF NOT EXISTS model_metadata_updated_at
|
||||
AFTER UPDATE
|
||||
ON model_metadata FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE model_metadata SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||
WHERE id = old.id;
|
||||
END;
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
def build_migration_4() -> Migration:
|
||||
"""
|
||||
Build the migration from database version 3 to 4.
|
||||
|
||||
Adds the tables needed to store model metadata and tags.
|
||||
"""
|
||||
migration_4 = Migration(
|
||||
from_version=3,
|
||||
to_version=4,
|
||||
callback=Migration4Callback(),
|
||||
)
|
||||
|
||||
return migration_4
|
@ -1,974 +0,0 @@
|
||||
{
|
||||
"name": "Prompt from File",
|
||||
"author": "InvokeAI",
|
||||
"description": "Sample workflow using Prompt from File node",
|
||||
"version": "0.1.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "text2image, prompt from file, default",
|
||||
"notes": "",
|
||||
"exposedFields": [
|
||||
{
|
||||
"nodeId": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||
"fieldName": "model"
|
||||
},
|
||||
{
|
||||
"nodeId": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
|
||||
"fieldName": "file_path"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"category": "default",
|
||||
"version": "2.0.0"
|
||||
},
|
||||
"nodes": [
|
||||
{
|
||||
"id": "c2eaf1ba-5708-4679-9e15-945b8b432692",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "c2eaf1ba-5708-4679-9e15-945b8b432692",
|
||||
"type": "compel",
|
||||
"label": "",
|
||||
"isOpen": false,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"prompt": {
|
||||
"id": "dcdf3f6d-9b96-4bcd-9b8d-f992fefe4f62",
|
||||
"name": "prompt",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": ""
|
||||
},
|
||||
"clip": {
|
||||
"id": "3f1981c9-d8a9-42eb-a739-4f120eb80745",
|
||||
"name": "clip",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"conditioning": {
|
||||
"id": "46205e6c-c5e2-44cb-9c82-1cd20b95674a",
|
||||
"name": "conditioning",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 925,
|
||||
"y": -200
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
},
|
||||
{
|
||||
"id": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
|
||||
"type": "prompt_from_file",
|
||||
"label": "Prompts from File",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.1",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"file_path": {
|
||||
"id": "37e37684-4f30-4ec8-beae-b333e550f904",
|
||||
"name": "file_path",
|
||||
"fieldKind": "input",
|
||||
"label": "Prompts File Path",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": ""
|
||||
},
|
||||
"pre_prompt": {
|
||||
"id": "7de02feb-819a-4992-bad3-72a30920ddea",
|
||||
"name": "pre_prompt",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": ""
|
||||
},
|
||||
"post_prompt": {
|
||||
"id": "95f191d8-a282-428e-bd65-de8cb9b7513a",
|
||||
"name": "post_prompt",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": ""
|
||||
},
|
||||
"start_line": {
|
||||
"id": "efee9a48-05ab-4829-8429-becfa64a0782",
|
||||
"name": "start_line",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 1
|
||||
},
|
||||
"max_prompts": {
|
||||
"id": "abebb428-3d3d-49fd-a482-4e96a16fff08",
|
||||
"name": "max_prompts",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 1
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"collection": {
|
||||
"id": "77d5d7f1-9877-4ab1-9a8c-33e9ffa9abf3",
|
||||
"name": "collection",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": true,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 475,
|
||||
"y": -400
|
||||
},
|
||||
"width": 320,
|
||||
"height": 506
|
||||
},
|
||||
{
|
||||
"id": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
||||
"type": "iterate",
|
||||
"label": "",
|
||||
"isOpen": false,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.1.0",
|
||||
"inputs": {
|
||||
"collection": {
|
||||
"id": "4c564bf8-5ed6-441e-ad2c-dda265d5785f",
|
||||
"name": "collection",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": true,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "CollectionField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"item": {
|
||||
"id": "36340f9a-e7a5-4afa-b4b5-313f4e292380",
|
||||
"name": "item",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "CollectionItemField"
|
||||
}
|
||||
},
|
||||
"index": {
|
||||
"id": "1beca95a-2159-460f-97ff-c8bab7d89336",
|
||||
"name": "index",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"total": {
|
||||
"id": "ead597b8-108e-4eda-88a8-5c29fa2f8df9",
|
||||
"name": "total",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 925,
|
||||
"y": -400
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
},
|
||||
{
|
||||
"id": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||
"type": "main_model_loader",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"model": {
|
||||
"id": "3f264259-3418-47d5-b90d-b6600e36ae46",
|
||||
"name": "model",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "MainModelField"
|
||||
},
|
||||
"value": {
|
||||
"model_name": "stable-diffusion-v1-5",
|
||||
"base_model": "sd-1",
|
||||
"model_type": "main"
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"unet": {
|
||||
"id": "8e182ea2-9d0a-4c02-9407-27819288d4b5",
|
||||
"name": "unet",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
},
|
||||
"clip": {
|
||||
"id": "d67d9d30-058c-46d5-bded-3d09d6d1aa39",
|
||||
"name": "clip",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
},
|
||||
"vae": {
|
||||
"id": "89641601-0429-4448-98d5-190822d920d8",
|
||||
"name": "vae",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "VaeField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 0,
|
||||
"y": -375
|
||||
},
|
||||
"width": 320,
|
||||
"height": 193
|
||||
},
|
||||
{
|
||||
"id": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
||||
"type": "compel",
|
||||
"label": "",
|
||||
"isOpen": false,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"prompt": {
|
||||
"id": "dcdf3f6d-9b96-4bcd-9b8d-f992fefe4f62",
|
||||
"name": "prompt",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": ""
|
||||
},
|
||||
"clip": {
|
||||
"id": "3f1981c9-d8a9-42eb-a739-4f120eb80745",
|
||||
"name": "clip",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"conditioning": {
|
||||
"id": "46205e6c-c5e2-44cb-9c82-1cd20b95674a",
|
||||
"name": "conditioning",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 925,
|
||||
"y": -275
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
},
|
||||
{
|
||||
"id": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
||||
"type": "noise",
|
||||
"label": "",
|
||||
"isOpen": false,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.1",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"seed": {
|
||||
"id": "b722d84a-eeee-484f-bef2-0250c027cb67",
|
||||
"name": "seed",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"width": {
|
||||
"id": "d5f8ce11-0502-4bfc-9a30-5757dddf1f94",
|
||||
"name": "width",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 512
|
||||
},
|
||||
"height": {
|
||||
"id": "f187d5ff-38a5-4c3f-b780-fc5801ef34af",
|
||||
"name": "height",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 512
|
||||
},
|
||||
"use_cpu": {
|
||||
"id": "12f112b8-8b76-4816-b79e-662edc9f9aa5",
|
||||
"name": "use_cpu",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": true
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"noise": {
|
||||
"id": "08576ad1-96d9-42d2-96ef-6f5c1961933f",
|
||||
"name": "noise",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "f3e1f94a-258d-41ff-9789-bd999bd9f40d",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "6cefc357-4339-415e-a951-49b9c2be32f4",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 925,
|
||||
"y": 25
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
},
|
||||
{
|
||||
"id": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
|
||||
"type": "rand_int",
|
||||
"label": "",
|
||||
"isOpen": false,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": false,
|
||||
"version": "1.0.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"low": {
|
||||
"id": "b9fc6cf1-469c-4037-9bf0-04836965826f",
|
||||
"name": "low",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"high": {
|
||||
"id": "06eac725-0f60-4ba2-b8cd-7ad9f757488c",
|
||||
"name": "high",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 2147483647
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"value": {
|
||||
"id": "df08c84e-7346-4e92-9042-9e5cb773aaff",
|
||||
"name": "value",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 925,
|
||||
"y": -50
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
},
|
||||
{
|
||||
"id": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
|
||||
"type": "l2i",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.2.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"metadata": {
|
||||
"id": "022e4b33-562b-438d-b7df-41c3fd931f40",
|
||||
"name": "metadata",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "MetadataField"
|
||||
}
|
||||
},
|
||||
"latents": {
|
||||
"id": "67cb6c77-a394-4a66-a6a9-a0a7dcca69ec",
|
||||
"name": "latents",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"vae": {
|
||||
"id": "7b3fd9ad-a4ef-4e04-89fa-3832a9902dbd",
|
||||
"name": "vae",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "VaeField"
|
||||
}
|
||||
},
|
||||
"tiled": {
|
||||
"id": "5ac5680d-3add-4115-8ec0-9ef5bb87493b",
|
||||
"name": "tiled",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": false
|
||||
},
|
||||
"fp32": {
|
||||
"id": "db8297f5-55f8-452f-98cf-6572c2582152",
|
||||
"name": "fp32",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": false
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"image": {
|
||||
"id": "d8778d0c-592a-4960-9280-4e77e00a7f33",
|
||||
"name": "image",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ImageField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "c8b0a75a-f5de-4ff2-9227-f25bb2b97bec",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "83c05fbf-76b9-49ab-93c4-fa4b10e793e4",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 2037.861329274915,
|
||||
"y": -329.8393457509562
|
||||
},
|
||||
"width": 320,
|
||||
"height": 224
|
||||
},
|
||||
{
|
||||
"id": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||
"type": "denoise_latents",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.5.1",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"positive_conditioning": {
|
||||
"id": "751fb35b-3f23-45ce-af1c-053e74251337",
|
||||
"name": "positive_conditioning",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
},
|
||||
"negative_conditioning": {
|
||||
"id": "b9dc06b6-7481-4db1-a8c2-39d22a5eacff",
|
||||
"name": "negative_conditioning",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
},
|
||||
"noise": {
|
||||
"id": "6e15e439-3390-48a4-8031-01e0e19f0e1d",
|
||||
"name": "noise",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"steps": {
|
||||
"id": "bfdfb3df-760b-4d51-b17b-0abb38b976c2",
|
||||
"name": "steps",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 10
|
||||
},
|
||||
"cfg_scale": {
|
||||
"id": "47770858-322e-41af-8494-d8b63ed735f3",
|
||||
"name": "cfg_scale",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 7.5
|
||||
},
|
||||
"denoising_start": {
|
||||
"id": "2ba78720-ee02-4130-a348-7bc3531f790b",
|
||||
"name": "denoising_start",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"denoising_end": {
|
||||
"id": "a874dffb-d433-4d1a-9f59-af4367bb05e4",
|
||||
"name": "denoising_end",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 1
|
||||
},
|
||||
"scheduler": {
|
||||
"id": "36e021ad-b762-4fe4-ad4d-17f0291c40b2",
|
||||
"name": "scheduler",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "SchedulerField"
|
||||
},
|
||||
"value": "euler"
|
||||
},
|
||||
"unet": {
|
||||
"id": "98d3282d-f9f6-4b5e-b9e8-58658f1cac78",
|
||||
"name": "unet",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
},
|
||||
"control": {
|
||||
"id": "f2ea3216-43d5-42b4-887f-36e8f7166d53",
|
||||
"name": "control",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "ControlField"
|
||||
}
|
||||
},
|
||||
"ip_adapter": {
|
||||
"id": "d0780610-a298-47c8-a54e-70e769e0dfe2",
|
||||
"name": "ip_adapter",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "IPAdapterField"
|
||||
}
|
||||
},
|
||||
"t2i_adapter": {
|
||||
"id": "fdb40970-185e-4ea8-8bb5-88f06f91f46a",
|
||||
"name": "t2i_adapter",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "T2IAdapterField"
|
||||
}
|
||||
},
|
||||
"cfg_rescale_multiplier": {
|
||||
"id": "3af2d8c5-de83-425c-a100-49cb0f1f4385",
|
||||
"name": "cfg_rescale_multiplier",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"latents": {
|
||||
"id": "e05b538a-1b5a-4aa5-84b1-fd2361289a81",
|
||||
"name": "latents",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"denoise_mask": {
|
||||
"id": "463a419e-df30-4382-8ffb-b25b25abe425",
|
||||
"name": "denoise_mask",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "DenoiseMaskField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"latents": {
|
||||
"id": "559ee688-66cf-4139-8b82-3d3aa69995ce",
|
||||
"name": "latents",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "0b4285c2-e8b9-48e5-98f6-0a49d3f98fd2",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "8b0881b9-45e5-47d5-b526-24b6661de0ee",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 1570.9941088179146,
|
||||
"y": -407.6505491604564
|
||||
},
|
||||
"width": 320,
|
||||
"height": 612
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"id": "1b89067c-3f6b-42c8-991f-e3055789b251-fc9d0e35-a6de-4a19-84e1-c72497c823f6-collapsed",
|
||||
"type": "collapsed",
|
||||
"source": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
||||
"target": "fc9d0e35-a6de-4a19-84e1-c72497c823f6"
|
||||
},
|
||||
{
|
||||
"id": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5-0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77-collapsed",
|
||||
"type": "collapsed",
|
||||
"source": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
|
||||
"target": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-1b7e0df8-8589-4915-a4ea-c0088f15d642collection-1b89067c-3f6b-42c8-991f-e3055789b251collection",
|
||||
"type": "default",
|
||||
"source": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
|
||||
"target": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
||||
"sourceHandle": "collection",
|
||||
"targetHandle": "collection"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426clip-fc9d0e35-a6de-4a19-84e1-c72497c823f6clip",
|
||||
"type": "default",
|
||||
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||
"target": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
||||
"sourceHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-1b89067c-3f6b-42c8-991f-e3055789b251item-fc9d0e35-a6de-4a19-84e1-c72497c823f6prompt",
|
||||
"type": "default",
|
||||
"source": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
||||
"target": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
||||
"sourceHandle": "item",
|
||||
"targetHandle": "prompt"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426clip-c2eaf1ba-5708-4679-9e15-945b8b432692clip",
|
||||
"type": "default",
|
||||
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||
"target": "c2eaf1ba-5708-4679-9e15-945b8b432692",
|
||||
"sourceHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5value-0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77seed",
|
||||
"type": "default",
|
||||
"source": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
|
||||
"target": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "seed"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-fc9d0e35-a6de-4a19-84e1-c72497c823f6conditioning-2fb1577f-0a56-4f12-8711-8afcaaaf1d5epositive_conditioning",
|
||||
"type": "default",
|
||||
"source": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
||||
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "positive_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c2eaf1ba-5708-4679-9e15-945b8b432692conditioning-2fb1577f-0a56-4f12-8711-8afcaaaf1d5enegative_conditioning",
|
||||
"type": "default",
|
||||
"source": "c2eaf1ba-5708-4679-9e15-945b8b432692",
|
||||
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "negative_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77noise-2fb1577f-0a56-4f12-8711-8afcaaaf1d5enoise",
|
||||
"type": "default",
|
||||
"source": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
||||
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||
"sourceHandle": "noise",
|
||||
"targetHandle": "noise"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426unet-2fb1577f-0a56-4f12-8711-8afcaaaf1d5eunet",
|
||||
"type": "default",
|
||||
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||
"sourceHandle": "unet",
|
||||
"targetHandle": "unet"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-2fb1577f-0a56-4f12-8711-8afcaaaf1d5elatents-491ec988-3c77-4c37-af8a-39a0c4e7a2a1latents",
|
||||
"type": "default",
|
||||
"source": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||
"target": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
|
||||
"sourceHandle": "latents",
|
||||
"targetHandle": "latents"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426vae-491ec988-3c77-4c37-af8a-39a0c4e7a2a1vae",
|
||||
"type": "default",
|
||||
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||
"target": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
|
||||
"sourceHandle": "vae",
|
||||
"targetHandle": "vae"
|
||||
}
|
||||
]
|
||||
}
|
@ -1,902 +0,0 @@
|
||||
{
|
||||
"name": "Text to Image with LoRA",
|
||||
"author": "InvokeAI",
|
||||
"description": "Simple text to image workflow with a LoRA",
|
||||
"version": "1.0.0",
|
||||
"contact": "invoke@invoke.ai",
|
||||
"tags": "text to image, lora, default",
|
||||
"notes": "",
|
||||
"exposedFields": [
|
||||
{
|
||||
"nodeId": "24e9d7ed-4836-4ec4-8f9e-e747721f9818",
|
||||
"fieldName": "model"
|
||||
},
|
||||
{
|
||||
"nodeId": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
||||
"fieldName": "lora"
|
||||
},
|
||||
{
|
||||
"nodeId": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
||||
"fieldName": "weight"
|
||||
},
|
||||
{
|
||||
"nodeId": "c3fa6872-2599-4a82-a596-b3446a66cf8b",
|
||||
"fieldName": "prompt"
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"category": "default",
|
||||
"version": "2.0.0"
|
||||
},
|
||||
"nodes": [
|
||||
{
|
||||
"id": "85b77bb2-c67a-416a-b3e8-291abe746c44",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "85b77bb2-c67a-416a-b3e8-291abe746c44",
|
||||
"type": "compel",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"inputs": {
|
||||
"prompt": {
|
||||
"id": "39fe92c4-38eb-4cc7-bf5e-cbcd31847b11",
|
||||
"name": "prompt",
|
||||
"fieldKind": "input",
|
||||
"label": "Negative Prompt",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": ""
|
||||
},
|
||||
"clip": {
|
||||
"id": "14313164-e5c4-4e40-a599-41b614fe3690",
|
||||
"name": "clip",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"conditioning": {
|
||||
"id": "02140b9d-50f3-470b-a0b7-01fc6ed2dcd6",
|
||||
"name": "conditioning",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 3425,
|
||||
"y": -300
|
||||
},
|
||||
"width": 320,
|
||||
"height": 219
|
||||
},
|
||||
{
|
||||
"id": "24e9d7ed-4836-4ec4-8f9e-e747721f9818",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "24e9d7ed-4836-4ec4-8f9e-e747721f9818",
|
||||
"type": "main_model_loader",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"inputs": {
|
||||
"model": {
|
||||
"id": "e2e1c177-ae39-4244-920e-d621fa156a24",
|
||||
"name": "model",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "MainModelField"
|
||||
},
|
||||
"value": {
|
||||
"model_name": "Analog-Diffusion",
|
||||
"base_model": "sd-1",
|
||||
"model_type": "main"
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"vae": {
|
||||
"id": "f91410e8-9378-4298-b285-f0f40ffd9825",
|
||||
"name": "vae",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "VaeField"
|
||||
}
|
||||
},
|
||||
"clip": {
|
||||
"id": "928d91bf-de0c-44a8-b0c8-4de0e2e5b438",
|
||||
"name": "clip",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
},
|
||||
"unet": {
|
||||
"id": "eacaf530-4e7e-472e-b904-462192189fc1",
|
||||
"name": "unet",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 2500,
|
||||
"y": -600
|
||||
},
|
||||
"width": 320,
|
||||
"height": 193
|
||||
},
|
||||
{
|
||||
"id": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
||||
"type": "lora_loader",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"inputs": {
|
||||
"lora": {
|
||||
"id": "36d867e8-92ea-4c3f-9ad5-ba05c64cf326",
|
||||
"name": "lora",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LoRAModelField"
|
||||
},
|
||||
"value": {
|
||||
"model_name": "Ink scenery",
|
||||
"base_model": "sd-1"
|
||||
}
|
||||
},
|
||||
"weight": {
|
||||
"id": "8be86540-ba81-49b3-b394-2b18fa70b867",
|
||||
"name": "weight",
|
||||
"fieldKind": "input",
|
||||
"label": "LoRA Weight",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 0.75
|
||||
},
|
||||
"unet": {
|
||||
"id": "9c4d5668-e9e1-411b-8f4b-e71115bc4a01",
|
||||
"name": "unet",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
},
|
||||
"clip": {
|
||||
"id": "918ec00e-e76f-4ad0-aee1-3927298cf03b",
|
||||
"name": "clip",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"unet": {
|
||||
"id": "c63f7825-1bcf-451d-b7a7-aa79f5c77416",
|
||||
"name": "unet",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
},
|
||||
"clip": {
|
||||
"id": "6f79ef2d-00f7-4917-bee3-53e845bf4192",
|
||||
"name": "clip",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 2975,
|
||||
"y": -600
|
||||
},
|
||||
"width": 320,
|
||||
"height": 218
|
||||
},
|
||||
{
|
||||
"id": "c3fa6872-2599-4a82-a596-b3446a66cf8b",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "c3fa6872-2599-4a82-a596-b3446a66cf8b",
|
||||
"type": "compel",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.0",
|
||||
"inputs": {
|
||||
"prompt": {
|
||||
"id": "39fe92c4-38eb-4cc7-bf5e-cbcd31847b11",
|
||||
"name": "prompt",
|
||||
"fieldKind": "input",
|
||||
"label": "Positive Prompt",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "StringField"
|
||||
},
|
||||
"value": "cute tiger cub"
|
||||
},
|
||||
"clip": {
|
||||
"id": "14313164-e5c4-4e40-a599-41b614fe3690",
|
||||
"name": "clip",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ClipField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"conditioning": {
|
||||
"id": "02140b9d-50f3-470b-a0b7-01fc6ed2dcd6",
|
||||
"name": "conditioning",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 3425,
|
||||
"y": -575
|
||||
},
|
||||
"width": 320,
|
||||
"height": 219
|
||||
},
|
||||
{
|
||||
"id": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
||||
"type": "denoise_latents",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.5.1",
|
||||
"inputs": {
|
||||
"positive_conditioning": {
|
||||
"id": "025ff44b-c4c6-4339-91b4-5f461e2cadc5",
|
||||
"name": "positive_conditioning",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
},
|
||||
"negative_conditioning": {
|
||||
"id": "2d92b45a-a7fb-4541-9a47-7c7495f50f54",
|
||||
"name": "negative_conditioning",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ConditioningField"
|
||||
}
|
||||
},
|
||||
"noise": {
|
||||
"id": "4d0deeff-24ed-4562-a1ca-7833c0649377",
|
||||
"name": "noise",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"steps": {
|
||||
"id": "c9907328-aece-4af9-8a95-211b4f99a325",
|
||||
"name": "steps",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 10
|
||||
},
|
||||
"cfg_scale": {
|
||||
"id": "7cf0f031-2078-49f4-9273-bb3a64ad7130",
|
||||
"name": "cfg_scale",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 7.5
|
||||
},
|
||||
"denoising_start": {
|
||||
"id": "44cec3ba-b404-4b51-ba98-add9d783279e",
|
||||
"name": "denoising_start",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"denoising_end": {
|
||||
"id": "3e7975f3-e438-4a13-8a14-395eba1fb7cd",
|
||||
"name": "denoising_end",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 1
|
||||
},
|
||||
"scheduler": {
|
||||
"id": "a6f6509b-7bb4-477d-b5fb-74baefa38111",
|
||||
"name": "scheduler",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "SchedulerField"
|
||||
},
|
||||
"value": "euler"
|
||||
},
|
||||
"unet": {
|
||||
"id": "5a87617a-b09f-417b-9b75-0cea4c255227",
|
||||
"name": "unet",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "UNetField"
|
||||
}
|
||||
},
|
||||
"control": {
|
||||
"id": "db87aace-ace8-4f2a-8f2b-1f752389fa9b",
|
||||
"name": "control",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "ControlField"
|
||||
}
|
||||
},
|
||||
"ip_adapter": {
|
||||
"id": "f0c133ed-4d6d-4567-bb9a-b1779810993c",
|
||||
"name": "ip_adapter",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "IPAdapterField"
|
||||
}
|
||||
},
|
||||
"t2i_adapter": {
|
||||
"id": "59ee1233-887f-45e7-aa14-cbad5f6cb77f",
|
||||
"name": "t2i_adapter",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": true,
|
||||
"name": "T2IAdapterField"
|
||||
}
|
||||
},
|
||||
"cfg_rescale_multiplier": {
|
||||
"id": "1a12e781-4b30-4707-b432-18c31866b5c3",
|
||||
"name": "cfg_rescale_multiplier",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "FloatField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"latents": {
|
||||
"id": "d0e593ae-305c-424b-9acd-3af830085832",
|
||||
"name": "latents",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"denoise_mask": {
|
||||
"id": "b81b5a79-fc2b-4011-aae6-64c92bae59a7",
|
||||
"name": "denoise_mask",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "DenoiseMaskField"
|
||||
}
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"latents": {
|
||||
"id": "9ae4022a-548e-407e-90cf-cc5ca5ff8a21",
|
||||
"name": "latents",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "730ba4bd-2c52-46bb-8c87-9b3aec155576",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "52b98f0b-b5ff-41b5-acc7-d0b1d1011a6f",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 3975,
|
||||
"y": -575
|
||||
},
|
||||
"width": 320,
|
||||
"height": 612
|
||||
},
|
||||
{
|
||||
"id": "ea18915f-2c5b-4569-b725-8e9e9122e8d3",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "ea18915f-2c5b-4569-b725-8e9e9122e8d3",
|
||||
"type": "noise",
|
||||
"label": "",
|
||||
"isOpen": false,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.0.1",
|
||||
"inputs": {
|
||||
"seed": {
|
||||
"id": "446ac80c-ba0a-4fea-a2d7-21128f52e5bf",
|
||||
"name": "seed",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"width": {
|
||||
"id": "779831b3-20b4-4f5f-9de7-d17de57288d8",
|
||||
"name": "width",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 512
|
||||
},
|
||||
"height": {
|
||||
"id": "08959766-6d67-4276-b122-e54b911f2316",
|
||||
"name": "height",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 512
|
||||
},
|
||||
"use_cpu": {
|
||||
"id": "53b36a98-00c4-4dc5-97a4-ef3432c0a805",
|
||||
"name": "use_cpu",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": true
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"noise": {
|
||||
"id": "eed95824-580b-442f-aa35-c073733cecce",
|
||||
"name": "noise",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "7985a261-dfee-47a8-908a-c5a8754f5dc4",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "3d00f6c1-84b0-4262-83d9-3bf755babeea",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 3425,
|
||||
"y": 75
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
},
|
||||
{
|
||||
"id": "6fd74a17-6065-47a5-b48b-f4e2b8fa7953",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "6fd74a17-6065-47a5-b48b-f4e2b8fa7953",
|
||||
"type": "rand_int",
|
||||
"label": "",
|
||||
"isOpen": false,
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": false,
|
||||
"version": "1.0.0",
|
||||
"inputs": {
|
||||
"low": {
|
||||
"id": "d25305f3-bfd6-446c-8e2c-0b025ec9e9ad",
|
||||
"name": "low",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 0
|
||||
},
|
||||
"high": {
|
||||
"id": "10376a3d-b8fe-4a51-b81a-ea46d8c12c78",
|
||||
"name": "high",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
},
|
||||
"value": 2147483647
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"value": {
|
||||
"id": "c64878fa-53b1-4202-b88a-cfb854216a57",
|
||||
"name": "value",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 3425,
|
||||
"y": 0
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
},
|
||||
{
|
||||
"id": "a9683c0a-6b1f-4a5e-8187-c57e764b3400",
|
||||
"type": "invocation",
|
||||
"data": {
|
||||
"id": "a9683c0a-6b1f-4a5e-8187-c57e764b3400",
|
||||
"type": "l2i",
|
||||
"label": "",
|
||||
"isOpen": true,
|
||||
"notes": "",
|
||||
"isIntermediate": false,
|
||||
"useCache": true,
|
||||
"version": "1.2.0",
|
||||
"inputs": {
|
||||
"metadata": {
|
||||
"id": "b1982e8a-14ad-4029-a697-beb30af8340f",
|
||||
"name": "metadata",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "MetadataField"
|
||||
}
|
||||
},
|
||||
"latents": {
|
||||
"id": "f7669388-9f91-46cc-94fc-301fa7041c3e",
|
||||
"name": "latents",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "LatentsField"
|
||||
}
|
||||
},
|
||||
"vae": {
|
||||
"id": "c6f2d4db-4d0a-4e3d-acb4-b5c5a228a3e2",
|
||||
"name": "vae",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "VaeField"
|
||||
}
|
||||
},
|
||||
"tiled": {
|
||||
"id": "19ef7d31-d96f-4e94-b7e5-95914e9076fc",
|
||||
"name": "tiled",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": false
|
||||
},
|
||||
"fp32": {
|
||||
"id": "a9454533-8ab7-4225-b411-646dc5e76d00",
|
||||
"name": "fp32",
|
||||
"fieldKind": "input",
|
||||
"label": "",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "BooleanField"
|
||||
},
|
||||
"value": false
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"image": {
|
||||
"id": "4f81274e-e216-47f3-9fb6-f97493a40e6f",
|
||||
"name": "image",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "ImageField"
|
||||
}
|
||||
},
|
||||
"width": {
|
||||
"id": "61a9acfb-1547-4f1e-8214-e89bd3855ee5",
|
||||
"name": "width",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
},
|
||||
"height": {
|
||||
"id": "b15cc793-4172-4b07-bcf4-5627bbc7d0d7",
|
||||
"name": "height",
|
||||
"fieldKind": "output",
|
||||
"type": {
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "IntegerField"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"position": {
|
||||
"x": 4450,
|
||||
"y": -550
|
||||
},
|
||||
"width": 320,
|
||||
"height": 224
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"id": "6fd74a17-6065-47a5-b48b-f4e2b8fa7953-ea18915f-2c5b-4569-b725-8e9e9122e8d3-collapsed",
|
||||
"type": "collapsed",
|
||||
"source": "6fd74a17-6065-47a5-b48b-f4e2b8fa7953",
|
||||
"target": "ea18915f-2c5b-4569-b725-8e9e9122e8d3"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-24e9d7ed-4836-4ec4-8f9e-e747721f9818clip-c41e705b-f2e3-4d1a-83c4-e34bb9344966clip",
|
||||
"type": "default",
|
||||
"source": "24e9d7ed-4836-4ec4-8f9e-e747721f9818",
|
||||
"target": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
||||
"sourceHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c41e705b-f2e3-4d1a-83c4-e34bb9344966clip-c3fa6872-2599-4a82-a596-b3446a66cf8bclip",
|
||||
"type": "default",
|
||||
"source": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
||||
"target": "c3fa6872-2599-4a82-a596-b3446a66cf8b",
|
||||
"sourceHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-24e9d7ed-4836-4ec4-8f9e-e747721f9818unet-c41e705b-f2e3-4d1a-83c4-e34bb9344966unet",
|
||||
"type": "default",
|
||||
"source": "24e9d7ed-4836-4ec4-8f9e-e747721f9818",
|
||||
"target": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
||||
"sourceHandle": "unet",
|
||||
"targetHandle": "unet"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c41e705b-f2e3-4d1a-83c4-e34bb9344966unet-ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63unet",
|
||||
"type": "default",
|
||||
"source": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
||||
"target": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
||||
"sourceHandle": "unet",
|
||||
"targetHandle": "unet"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-85b77bb2-c67a-416a-b3e8-291abe746c44conditioning-ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63negative_conditioning",
|
||||
"type": "default",
|
||||
"source": "85b77bb2-c67a-416a-b3e8-291abe746c44",
|
||||
"target": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "negative_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c3fa6872-2599-4a82-a596-b3446a66cf8bconditioning-ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63positive_conditioning",
|
||||
"type": "default",
|
||||
"source": "c3fa6872-2599-4a82-a596-b3446a66cf8b",
|
||||
"target": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "positive_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-ea18915f-2c5b-4569-b725-8e9e9122e8d3noise-ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63noise",
|
||||
"type": "default",
|
||||
"source": "ea18915f-2c5b-4569-b725-8e9e9122e8d3",
|
||||
"target": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
||||
"sourceHandle": "noise",
|
||||
"targetHandle": "noise"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-6fd74a17-6065-47a5-b48b-f4e2b8fa7953value-ea18915f-2c5b-4569-b725-8e9e9122e8d3seed",
|
||||
"type": "default",
|
||||
"source": "6fd74a17-6065-47a5-b48b-f4e2b8fa7953",
|
||||
"target": "ea18915f-2c5b-4569-b725-8e9e9122e8d3",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "seed"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63latents-a9683c0a-6b1f-4a5e-8187-c57e764b3400latents",
|
||||
"type": "default",
|
||||
"source": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
||||
"target": "a9683c0a-6b1f-4a5e-8187-c57e764b3400",
|
||||
"sourceHandle": "latents",
|
||||
"targetHandle": "latents"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-24e9d7ed-4836-4ec4-8f9e-e747721f9818vae-a9683c0a-6b1f-4a5e-8187-c57e764b3400vae",
|
||||
"type": "default",
|
||||
"source": "24e9d7ed-4836-4ec4-8f9e-e747721f9818",
|
||||
"target": "a9683c0a-6b1f-4a5e-8187-c57e764b3400",
|
||||
"sourceHandle": "vae",
|
||||
"targetHandle": "vae"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c41e705b-f2e3-4d1a-83c4-e34bb9344966clip-85b77bb2-c67a-416a-b3e8-291abe746c44clip",
|
||||
"type": "default",
|
||||
"source": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
||||
"target": "85b77bb2-c67a-416a-b3e8-291abe746c44",
|
||||
"sourceHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
}
|
||||
]
|
||||
}
|
@ -84,12 +84,12 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 259,
|
||||
"position": {
|
||||
"x": 1000,
|
||||
"y": 350
|
||||
},
|
||||
"width": 320,
|
||||
"height": 219
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
@ -187,12 +187,12 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 388,
|
||||
"position": {
|
||||
"x": 600,
|
||||
"y": 325
|
||||
},
|
||||
"width": 320,
|
||||
"height": 388
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
@ -258,12 +258,12 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 226,
|
||||
"position": {
|
||||
"x": 600,
|
||||
"y": 25
|
||||
},
|
||||
"width": 320,
|
||||
"height": 193
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||
@ -316,12 +316,12 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 259,
|
||||
"position": {
|
||||
"x": 1000,
|
||||
"y": 25
|
||||
},
|
||||
"width": 320,
|
||||
"height": 219
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||
@ -375,12 +375,12 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 32,
|
||||
"position": {
|
||||
"x": 600,
|
||||
"y": 275
|
||||
},
|
||||
"width": 320,
|
||||
"height": 32
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
@ -393,7 +393,7 @@
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.5.1",
|
||||
"version": "1.5.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"positive_conditioning": {
|
||||
@ -601,12 +601,12 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 703,
|
||||
"position": {
|
||||
"x": 1400,
|
||||
"y": 25
|
||||
},
|
||||
"width": 320,
|
||||
"height": 612
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||
@ -713,86 +713,86 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 266,
|
||||
"position": {
|
||||
"x": 1800,
|
||||
"y": 25
|
||||
},
|
||||
"width": 320,
|
||||
"height": 224
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"id": "reactflow__edge-ea94bc37-d995-4a83-aa99-4af42479f2f2value-55705012-79b9-4aac-9f26-c0b10309785bseed",
|
||||
"type": "default",
|
||||
"source": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||
"target": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"type": "default",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "seed"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8clip-7d8bf987-284f-413a-b2fd-d825445a5d6cclip",
|
||||
"type": "default",
|
||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"target": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||
"type": "default",
|
||||
"sourceHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8clip-93dc02a4-d05b-48ed-b99c-c9b616af3402clip",
|
||||
"type": "default",
|
||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"target": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||
"type": "default",
|
||||
"sourceHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-eea2702a-19fb-45b5-9d75-56b4211ec03cnoise",
|
||||
"type": "default",
|
||||
"source": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"type": "default",
|
||||
"sourceHandle": "noise",
|
||||
"targetHandle": "noise"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-7d8bf987-284f-413a-b2fd-d825445a5d6cconditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cpositive_conditioning",
|
||||
"type": "default",
|
||||
"source": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"type": "default",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "positive_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-93dc02a4-d05b-48ed-b99c-c9b616af3402conditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cnegative_conditioning",
|
||||
"type": "default",
|
||||
"source": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"type": "default",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "negative_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8unet-eea2702a-19fb-45b5-9d75-56b4211ec03cunet",
|
||||
"type": "default",
|
||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"type": "default",
|
||||
"sourceHandle": "unet",
|
||||
"targetHandle": "unet"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-eea2702a-19fb-45b5-9d75-56b4211ec03clatents-58c957f5-0d01-41fc-a803-b2bbf0413d4flatents",
|
||||
"type": "default",
|
||||
"source": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||
"target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||
"type": "default",
|
||||
"sourceHandle": "latents",
|
||||
"targetHandle": "latents"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8vae-58c957f5-0d01-41fc-a803-b2bbf0413d4fvae",
|
||||
"type": "default",
|
||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||
"target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||
"type": "default",
|
||||
"sourceHandle": "vae",
|
||||
"targetHandle": "vae"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -80,12 +80,12 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 32,
|
||||
"position": {
|
||||
"x": 750,
|
||||
"y": -225
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "719dabe8-8297-4749-aea1-37be301cd425",
|
||||
@ -126,12 +126,12 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 258,
|
||||
"position": {
|
||||
"x": 750,
|
||||
"y": -125
|
||||
},
|
||||
"width": 320,
|
||||
"height": 219
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||
@ -279,12 +279,12 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 32,
|
||||
"position": {
|
||||
"x": 750,
|
||||
"y": 200
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
@ -382,12 +382,12 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 388,
|
||||
"position": {
|
||||
"x": 375,
|
||||
"y": 0
|
||||
},
|
||||
"width": 320,
|
||||
"height": 336
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||
@ -441,12 +441,12 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 32,
|
||||
"position": {
|
||||
"x": 375,
|
||||
"y": -50
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||
@ -471,7 +471,8 @@
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "SDXLMainModelField"
|
||||
}
|
||||
},
|
||||
"value": null
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
@ -517,12 +518,12 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 257,
|
||||
"position": {
|
||||
"x": 375,
|
||||
"y": -500
|
||||
},
|
||||
"width": 320,
|
||||
"height": 219
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||
@ -670,12 +671,12 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 32,
|
||||
"position": {
|
||||
"x": 750,
|
||||
"y": -175
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "63e91020-83b2-4f35-b174-ad9692aabb48",
|
||||
@ -782,12 +783,12 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 266,
|
||||
"position": {
|
||||
"x": 1475,
|
||||
"y": -500
|
||||
},
|
||||
"width": 320,
|
||||
"height": 224
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||
@ -800,7 +801,7 @@
|
||||
"notes": "",
|
||||
"isIntermediate": true,
|
||||
"useCache": true,
|
||||
"version": "1.5.1",
|
||||
"version": "1.5.0",
|
||||
"nodePack": "invokeai",
|
||||
"inputs": {
|
||||
"positive_conditioning": {
|
||||
@ -1008,12 +1009,12 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 702,
|
||||
"position": {
|
||||
"x": 1125,
|
||||
"y": -500
|
||||
},
|
||||
"width": 320,
|
||||
"height": 612
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "0093692f-9cf4-454d-a5b8-62f0e3eb3bb8",
|
||||
@ -1037,7 +1038,8 @@
|
||||
"isCollection": false,
|
||||
"isCollectionOrScalar": false,
|
||||
"name": "VAEModelField"
|
||||
}
|
||||
},
|
||||
"value": null
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
@ -1053,12 +1055,12 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 161,
|
||||
"position": {
|
||||
"x": 375,
|
||||
"y": -225
|
||||
},
|
||||
"width": 320,
|
||||
"height": 139
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "ade2c0d3-0384-4157-b39b-29ce429cfa15",
|
||||
@ -1099,12 +1101,12 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 258,
|
||||
"position": {
|
||||
"x": 750,
|
||||
"y": -500
|
||||
},
|
||||
"width": 320,
|
||||
"height": 219
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "ad8fa655-3a76-43d0-9c02-4d7644dea650",
|
||||
@ -1157,162 +1159,162 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"width": 320,
|
||||
"height": 32,
|
||||
"position": {
|
||||
"x": 750,
|
||||
"y": 150
|
||||
},
|
||||
"width": 320,
|
||||
"height": 24
|
||||
}
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"id": "3774ec24-a69e-4254-864c-097d07a6256f-faf965a4-7530-427b-b1f3-4ba6505c2a08-collapsed",
|
||||
"type": "collapsed",
|
||||
"source": "3774ec24-a69e-4254-864c-097d07a6256f",
|
||||
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08"
|
||||
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||
"type": "collapsed"
|
||||
},
|
||||
{
|
||||
"id": "ad8fa655-3a76-43d0-9c02-4d7644dea650-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204-collapsed",
|
||||
"type": "collapsed",
|
||||
"source": "ad8fa655-3a76-43d0-9c02-4d7644dea650",
|
||||
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204"
|
||||
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||
"type": "collapsed"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-ea94bc37-d995-4a83-aa99-4af42479f2f2value-55705012-79b9-4aac-9f26-c0b10309785bseed",
|
||||
"type": "default",
|
||||
"source": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||
"target": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"type": "default",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "seed"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip-faf965a4-7530-427b-b1f3-4ba6505c2a08clip",
|
||||
"type": "default",
|
||||
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||
"type": "default",
|
||||
"sourceHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip2-faf965a4-7530-427b-b1f3-4ba6505c2a08clip2",
|
||||
"type": "default",
|
||||
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||
"type": "default",
|
||||
"sourceHandle": "clip2",
|
||||
"targetHandle": "clip2"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204clip",
|
||||
"type": "default",
|
||||
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||
"type": "default",
|
||||
"sourceHandle": "clip",
|
||||
"targetHandle": "clip"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip2-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204clip2",
|
||||
"type": "default",
|
||||
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||
"type": "default",
|
||||
"sourceHandle": "clip2",
|
||||
"targetHandle": "clip2"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22unet-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbunet",
|
||||
"type": "default",
|
||||
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||
"type": "default",
|
||||
"sourceHandle": "unet",
|
||||
"targetHandle": "unet"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-faf965a4-7530-427b-b1f3-4ba6505c2a08conditioning-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbpositive_conditioning",
|
||||
"type": "default",
|
||||
"source": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||
"type": "default",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "positive_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204conditioning-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbnegative_conditioning",
|
||||
"type": "default",
|
||||
"source": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||
"type": "default",
|
||||
"sourceHandle": "conditioning",
|
||||
"targetHandle": "negative_conditioning"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbnoise",
|
||||
"type": "default",
|
||||
"source": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||
"type": "default",
|
||||
"sourceHandle": "noise",
|
||||
"targetHandle": "noise"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfblatents-63e91020-83b2-4f35-b174-ad9692aabb48latents",
|
||||
"type": "default",
|
||||
"source": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||
"target": "63e91020-83b2-4f35-b174-ad9692aabb48",
|
||||
"type": "default",
|
||||
"sourceHandle": "latents",
|
||||
"targetHandle": "latents"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-0093692f-9cf4-454d-a5b8-62f0e3eb3bb8vae-63e91020-83b2-4f35-b174-ad9692aabb48vae",
|
||||
"type": "default",
|
||||
"source": "0093692f-9cf4-454d-a5b8-62f0e3eb3bb8",
|
||||
"target": "63e91020-83b2-4f35-b174-ad9692aabb48",
|
||||
"type": "default",
|
||||
"sourceHandle": "vae",
|
||||
"targetHandle": "vae"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-ade2c0d3-0384-4157-b39b-29ce429cfa15value-faf965a4-7530-427b-b1f3-4ba6505c2a08prompt",
|
||||
"type": "default",
|
||||
"source": "ade2c0d3-0384-4157-b39b-29ce429cfa15",
|
||||
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||
"type": "default",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "prompt"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-719dabe8-8297-4749-aea1-37be301cd425value-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204prompt",
|
||||
"type": "default",
|
||||
"source": "719dabe8-8297-4749-aea1-37be301cd425",
|
||||
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||
"type": "default",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "prompt"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-719dabe8-8297-4749-aea1-37be301cd425value-ad8fa655-3a76-43d0-9c02-4d7644dea650string_left",
|
||||
"type": "default",
|
||||
"source": "719dabe8-8297-4749-aea1-37be301cd425",
|
||||
"target": "ad8fa655-3a76-43d0-9c02-4d7644dea650",
|
||||
"type": "default",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "string_left"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-ad8fa655-3a76-43d0-9c02-4d7644dea650value-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204style",
|
||||
"type": "default",
|
||||
"source": "ad8fa655-3a76-43d0-9c02-4d7644dea650",
|
||||
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||
"type": "default",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "style"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-ade2c0d3-0384-4157-b39b-29ce429cfa15value-3774ec24-a69e-4254-864c-097d07a6256fstring_left",
|
||||
"type": "default",
|
||||
"source": "ade2c0d3-0384-4157-b39b-29ce429cfa15",
|
||||
"target": "3774ec24-a69e-4254-864c-097d07a6256f",
|
||||
"type": "default",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "string_left"
|
||||
},
|
||||
{
|
||||
"id": "reactflow__edge-3774ec24-a69e-4254-864c-097d07a6256fvalue-faf965a4-7530-427b-b1f3-4ba6505c2a08style",
|
||||
"type": "default",
|
||||
"source": "3774ec24-a69e-4254-864c-097d07a6256f",
|
||||
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||
"type": "default",
|
||||
"sourceHandle": "value",
|
||||
"targetHandle": "style"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -31,7 +31,6 @@ class WorkflowRecordOrderBy(str, Enum, metaclass=MetaEnum):
|
||||
class WorkflowCategory(str, Enum, metaclass=MetaEnum):
|
||||
User = "user"
|
||||
Default = "default"
|
||||
Project = "project"
|
||||
|
||||
|
||||
class WorkflowMeta(BaseModel):
|
||||
@ -66,24 +65,12 @@ class WorkflowWithoutID(BaseModel):
|
||||
nodes: list[dict[str, JsonValue]] = Field(description="The nodes of the workflow.")
|
||||
edges: list[dict[str, JsonValue]] = Field(description="The edges of the workflow.")
|
||||
|
||||
model_config = ConfigDict(extra="ignore")
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
|
||||
WorkflowWithoutIDValidator = TypeAdapter(WorkflowWithoutID)
|
||||
|
||||
|
||||
class UnsafeWorkflowWithVersion(BaseModel):
|
||||
"""
|
||||
This utility model only requires a workflow to have a valid version string.
|
||||
It is used to validate a workflow version without having to validate the entire workflow.
|
||||
"""
|
||||
|
||||
meta: WorkflowMeta = Field(description="The meta of the workflow.")
|
||||
|
||||
|
||||
UnsafeWorkflowWithVersionValidator = TypeAdapter(UnsafeWorkflowWithVersion)
|
||||
|
||||
|
||||
class Workflow(WorkflowWithoutID):
|
||||
id: str = Field(description="The id of the workflow.")
|
||||
|
||||
|
@ -169,7 +169,7 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
||||
|
||||
self._cursor.execute(count_query, count_params)
|
||||
total = self._cursor.fetchone()[0]
|
||||
pages = total // per_page + (total % per_page > 0)
|
||||
pages = int(total / per_page) + 1
|
||||
|
||||
return PaginatedResults(
|
||||
items=workflows,
|
||||
|
@ -1,8 +0,0 @@
|
||||
import re
|
||||
|
||||
|
||||
def extract_ti_triggers_from_prompt(prompt: str) -> list[str]:
|
||||
ti_triggers = []
|
||||
for trigger in re.findall(r"<[a-zA-Z0-9., _-]+>", prompt):
|
||||
ti_triggers.append(trigger)
|
||||
return ti_triggers
|
@ -1,109 +0,0 @@
|
||||
import pathlib
|
||||
from typing import Literal, Union
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from einops import repeat
|
||||
from PIL import Image
|
||||
from torchvision.transforms import Compose
|
||||
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||
from invokeai.backend.image_util.depth_anything.model.dpt import DPT_DINOv2
|
||||
from invokeai.backend.image_util.depth_anything.utilities.util import NormalizeImage, PrepareForNet, Resize
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
from invokeai.backend.util.util import download_with_progress_bar
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
|
||||
DEPTH_ANYTHING_MODELS = {
|
||||
"large": {
|
||||
"url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitl14.pth?download=true",
|
||||
"local": "any/annotators/depth_anything/depth_anything_vitl14.pth",
|
||||
},
|
||||
"base": {
|
||||
"url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitb14.pth?download=true",
|
||||
"local": "any/annotators/depth_anything/depth_anything_vitb14.pth",
|
||||
},
|
||||
"small": {
|
||||
"url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vits14.pth?download=true",
|
||||
"local": "any/annotators/depth_anything/depth_anything_vits14.pth",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
transform = Compose(
|
||||
[
|
||||
Resize(
|
||||
width=518,
|
||||
height=518,
|
||||
resize_target=False,
|
||||
keep_aspect_ratio=True,
|
||||
ensure_multiple_of=14,
|
||||
resize_method="lower_bound",
|
||||
image_interpolation_method=cv2.INTER_CUBIC,
|
||||
),
|
||||
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
||||
PrepareForNet(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class DepthAnythingDetector:
|
||||
def __init__(self) -> None:
|
||||
self.model = None
|
||||
self.model_size: Union[Literal["large", "base", "small"], None] = None
|
||||
|
||||
def load_model(self, model_size=Literal["large", "base", "small"]):
|
||||
DEPTH_ANYTHING_MODEL_PATH = pathlib.Path(config.models_path / DEPTH_ANYTHING_MODELS[model_size]["local"])
|
||||
if not DEPTH_ANYTHING_MODEL_PATH.exists():
|
||||
download_with_progress_bar(DEPTH_ANYTHING_MODELS[model_size]["url"], DEPTH_ANYTHING_MODEL_PATH)
|
||||
|
||||
if not self.model or model_size != self.model_size:
|
||||
del self.model
|
||||
self.model_size = model_size
|
||||
|
||||
match self.model_size:
|
||||
case "small":
|
||||
self.model = DPT_DINOv2(encoder="vits", features=64, out_channels=[48, 96, 192, 384])
|
||||
case "base":
|
||||
self.model = DPT_DINOv2(encoder="vitb", features=128, out_channels=[96, 192, 384, 768])
|
||||
case "large":
|
||||
self.model = DPT_DINOv2(encoder="vitl", features=256, out_channels=[256, 512, 1024, 1024])
|
||||
case _:
|
||||
raise TypeError("Not a supported model")
|
||||
|
||||
self.model.load_state_dict(torch.load(DEPTH_ANYTHING_MODEL_PATH.as_posix(), map_location="cpu"))
|
||||
self.model.eval()
|
||||
|
||||
self.model.to(choose_torch_device())
|
||||
return self.model
|
||||
|
||||
def to(self, device):
|
||||
self.model.to(device)
|
||||
return self
|
||||
|
||||
def __call__(self, image, resolution=512, offload=False):
|
||||
image = np.array(image, dtype=np.uint8)
|
||||
image = image[:, :, ::-1] / 255.0
|
||||
|
||||
image_height, image_width = image.shape[:2]
|
||||
image = transform({"image": image})["image"]
|
||||
image = torch.from_numpy(image).unsqueeze(0).to(choose_torch_device())
|
||||
|
||||
with torch.no_grad():
|
||||
depth = self.model(image)
|
||||
depth = F.interpolate(depth[None], (image_height, image_width), mode="bilinear", align_corners=False)[0, 0]
|
||||
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
|
||||
|
||||
depth_map = repeat(depth, "h w -> h w 3").cpu().numpy().astype(np.uint8)
|
||||
depth_map = Image.fromarray(depth_map)
|
||||
|
||||
new_height = int(image_height * (resolution / image_width))
|
||||
depth_map = depth_map.resize((resolution, new_height))
|
||||
|
||||
if offload:
|
||||
del self.model
|
||||
|
||||
return depth_map
|
@ -1,145 +0,0 @@
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
def _make_scratch(in_shape, out_shape, groups=1, expand=False):
|
||||
scratch = nn.Module()
|
||||
|
||||
out_shape1 = out_shape
|
||||
out_shape2 = out_shape
|
||||
out_shape3 = out_shape
|
||||
if len(in_shape) >= 4:
|
||||
out_shape4 = out_shape
|
||||
|
||||
if expand:
|
||||
out_shape1 = out_shape
|
||||
out_shape2 = out_shape * 2
|
||||
out_shape3 = out_shape * 4
|
||||
if len(in_shape) >= 4:
|
||||
out_shape4 = out_shape * 8
|
||||
|
||||
scratch.layer1_rn = nn.Conv2d(
|
||||
in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||
)
|
||||
scratch.layer2_rn = nn.Conv2d(
|
||||
in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||
)
|
||||
scratch.layer3_rn = nn.Conv2d(
|
||||
in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||
)
|
||||
if len(in_shape) >= 4:
|
||||
scratch.layer4_rn = nn.Conv2d(
|
||||
in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
||||
)
|
||||
|
||||
return scratch
|
||||
|
||||
|
||||
class ResidualConvUnit(nn.Module):
|
||||
"""Residual convolution module."""
|
||||
|
||||
def __init__(self, features, activation, bn):
|
||||
"""Init.
|
||||
|
||||
Args:
|
||||
features (int): number of features
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.bn = bn
|
||||
|
||||
self.groups = 1
|
||||
|
||||
self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
|
||||
|
||||
self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
|
||||
|
||||
if self.bn:
|
||||
self.bn1 = nn.BatchNorm2d(features)
|
||||
self.bn2 = nn.BatchNorm2d(features)
|
||||
|
||||
self.activation = activation
|
||||
|
||||
self.skip_add = nn.quantized.FloatFunctional()
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass.
|
||||
|
||||
Args:
|
||||
x (tensor): input
|
||||
|
||||
Returns:
|
||||
tensor: output
|
||||
"""
|
||||
|
||||
out = self.activation(x)
|
||||
out = self.conv1(out)
|
||||
if self.bn:
|
||||
out = self.bn1(out)
|
||||
|
||||
out = self.activation(out)
|
||||
out = self.conv2(out)
|
||||
if self.bn:
|
||||
out = self.bn2(out)
|
||||
|
||||
if self.groups > 1:
|
||||
out = self.conv_merge(out)
|
||||
|
||||
return self.skip_add.add(out, x)
|
||||
|
||||
|
||||
class FeatureFusionBlock(nn.Module):
|
||||
"""Feature fusion block."""
|
||||
|
||||
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None):
|
||||
"""Init.
|
||||
|
||||
Args:
|
||||
features (int): number of features
|
||||
"""
|
||||
super(FeatureFusionBlock, self).__init__()
|
||||
|
||||
self.deconv = deconv
|
||||
self.align_corners = align_corners
|
||||
|
||||
self.groups = 1
|
||||
|
||||
self.expand = expand
|
||||
out_features = features
|
||||
if self.expand:
|
||||
out_features = features // 2
|
||||
|
||||
self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
|
||||
|
||||
self.resConfUnit1 = ResidualConvUnit(features, activation, bn)
|
||||
self.resConfUnit2 = ResidualConvUnit(features, activation, bn)
|
||||
|
||||
self.skip_add = nn.quantized.FloatFunctional()
|
||||
|
||||
self.size = size
|
||||
|
||||
def forward(self, *xs, size=None):
|
||||
"""Forward pass.
|
||||
|
||||
Returns:
|
||||
tensor: output
|
||||
"""
|
||||
output = xs[0]
|
||||
|
||||
if len(xs) == 2:
|
||||
res = self.resConfUnit1(xs[1])
|
||||
output = self.skip_add.add(output, res)
|
||||
|
||||
output = self.resConfUnit2(output)
|
||||
|
||||
if (size is None) and (self.size is None):
|
||||
modifier = {"scale_factor": 2}
|
||||
elif size is None:
|
||||
modifier = {"size": self.size}
|
||||
else:
|
||||
modifier = {"size": size}
|
||||
|
||||
output = nn.functional.interpolate(output, **modifier, mode="bilinear", align_corners=self.align_corners)
|
||||
|
||||
output = self.out_conv(output)
|
||||
|
||||
return output
|
@ -1,183 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from .blocks import FeatureFusionBlock, _make_scratch
|
||||
|
||||
torchhub_path = Path(__file__).parent.parent / "torchhub"
|
||||
|
||||
|
||||
def _make_fusion_block(features, use_bn, size=None):
|
||||
return FeatureFusionBlock(
|
||||
features,
|
||||
nn.ReLU(False),
|
||||
deconv=False,
|
||||
bn=use_bn,
|
||||
expand=False,
|
||||
align_corners=True,
|
||||
size=size,
|
||||
)
|
||||
|
||||
|
||||
class DPTHead(nn.Module):
|
||||
def __init__(self, nclass, in_channels, features, out_channels, use_bn=False, use_clstoken=False):
|
||||
super(DPTHead, self).__init__()
|
||||
|
||||
self.nclass = nclass
|
||||
self.use_clstoken = use_clstoken
|
||||
|
||||
self.projects = nn.ModuleList(
|
||||
[
|
||||
nn.Conv2d(
|
||||
in_channels=in_channels,
|
||||
out_channels=out_channel,
|
||||
kernel_size=1,
|
||||
stride=1,
|
||||
padding=0,
|
||||
)
|
||||
for out_channel in out_channels
|
||||
]
|
||||
)
|
||||
|
||||
self.resize_layers = nn.ModuleList(
|
||||
[
|
||||
nn.ConvTranspose2d(
|
||||
in_channels=out_channels[0], out_channels=out_channels[0], kernel_size=4, stride=4, padding=0
|
||||
),
|
||||
nn.ConvTranspose2d(
|
||||
in_channels=out_channels[1], out_channels=out_channels[1], kernel_size=2, stride=2, padding=0
|
||||
),
|
||||
nn.Identity(),
|
||||
nn.Conv2d(
|
||||
in_channels=out_channels[3], out_channels=out_channels[3], kernel_size=3, stride=2, padding=1
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
if use_clstoken:
|
||||
self.readout_projects = nn.ModuleList()
|
||||
for _ in range(len(self.projects)):
|
||||
self.readout_projects.append(nn.Sequential(nn.Linear(2 * in_channels, in_channels), nn.GELU()))
|
||||
|
||||
self.scratch = _make_scratch(
|
||||
out_channels,
|
||||
features,
|
||||
groups=1,
|
||||
expand=False,
|
||||
)
|
||||
|
||||
self.scratch.stem_transpose = None
|
||||
|
||||
self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
|
||||
self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
|
||||
self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
|
||||
self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
|
||||
|
||||
head_features_1 = features
|
||||
head_features_2 = 32
|
||||
|
||||
if nclass > 1:
|
||||
self.scratch.output_conv = nn.Sequential(
|
||||
nn.Conv2d(head_features_1, head_features_1, kernel_size=3, stride=1, padding=1),
|
||||
nn.ReLU(True),
|
||||
nn.Conv2d(head_features_1, nclass, kernel_size=1, stride=1, padding=0),
|
||||
)
|
||||
else:
|
||||
self.scratch.output_conv1 = nn.Conv2d(
|
||||
head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1
|
||||
)
|
||||
|
||||
self.scratch.output_conv2 = nn.Sequential(
|
||||
nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1),
|
||||
nn.ReLU(True),
|
||||
nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0),
|
||||
nn.ReLU(True),
|
||||
nn.Identity(),
|
||||
)
|
||||
|
||||
def forward(self, out_features, patch_h, patch_w):
|
||||
out = []
|
||||
for i, x in enumerate(out_features):
|
||||
if self.use_clstoken:
|
||||
x, cls_token = x[0], x[1]
|
||||
readout = cls_token.unsqueeze(1).expand_as(x)
|
||||
x = self.readout_projects[i](torch.cat((x, readout), -1))
|
||||
else:
|
||||
x = x[0]
|
||||
|
||||
x = x.permute(0, 2, 1).reshape((x.shape[0], x.shape[-1], patch_h, patch_w))
|
||||
|
||||
x = self.projects[i](x)
|
||||
x = self.resize_layers[i](x)
|
||||
|
||||
out.append(x)
|
||||
|
||||
layer_1, layer_2, layer_3, layer_4 = out
|
||||
|
||||
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
||||
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
||||
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
||||
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
||||
|
||||
path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:])
|
||||
path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:])
|
||||
path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:])
|
||||
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
||||
|
||||
out = self.scratch.output_conv1(path_1)
|
||||
out = F.interpolate(out, (int(patch_h * 14), int(patch_w * 14)), mode="bilinear", align_corners=True)
|
||||
out = self.scratch.output_conv2(out)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
class DPT_DINOv2(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
features,
|
||||
out_channels,
|
||||
encoder="vitl",
|
||||
use_bn=False,
|
||||
use_clstoken=False,
|
||||
):
|
||||
super(DPT_DINOv2, self).__init__()
|
||||
|
||||
assert encoder in ["vits", "vitb", "vitl"]
|
||||
|
||||
# # in case the Internet connection is not stable, please load the DINOv2 locally
|
||||
# if use_local:
|
||||
# self.pretrained = torch.hub.load(
|
||||
# torchhub_path / "facebookresearch_dinov2_main",
|
||||
# "dinov2_{:}14".format(encoder),
|
||||
# source="local",
|
||||
# pretrained=False,
|
||||
# )
|
||||
# else:
|
||||
# self.pretrained = torch.hub.load(
|
||||
# "facebookresearch/dinov2",
|
||||
# "dinov2_{:}14".format(encoder),
|
||||
# )
|
||||
|
||||
self.pretrained = torch.hub.load(
|
||||
"facebookresearch/dinov2",
|
||||
"dinov2_{:}14".format(encoder),
|
||||
)
|
||||
|
||||
dim = self.pretrained.blocks[0].attn.qkv.in_features
|
||||
|
||||
self.depth_head = DPTHead(1, dim, features, out_channels=out_channels, use_bn=use_bn, use_clstoken=use_clstoken)
|
||||
|
||||
def forward(self, x):
|
||||
h, w = x.shape[-2:]
|
||||
|
||||
features = self.pretrained.get_intermediate_layers(x, 4, return_class_token=True)
|
||||
|
||||
patch_h, patch_w = h // 14, w // 14
|
||||
|
||||
depth = self.depth_head(features, patch_h, patch_w)
|
||||
depth = F.interpolate(depth, size=(h, w), mode="bilinear", align_corners=True)
|
||||
depth = F.relu(depth)
|
||||
|
||||
return depth.squeeze(1)
|
@ -1,227 +0,0 @@
|
||||
import math
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
|
||||
"""Rezise the sample to ensure the given size. Keeps aspect ratio.
|
||||
|
||||
Args:
|
||||
sample (dict): sample
|
||||
size (tuple): image size
|
||||
|
||||
Returns:
|
||||
tuple: new size
|
||||
"""
|
||||
shape = list(sample["disparity"].shape)
|
||||
|
||||
if shape[0] >= size[0] and shape[1] >= size[1]:
|
||||
return sample
|
||||
|
||||
scale = [0, 0]
|
||||
scale[0] = size[0] / shape[0]
|
||||
scale[1] = size[1] / shape[1]
|
||||
|
||||
scale = max(scale)
|
||||
|
||||
shape[0] = math.ceil(scale * shape[0])
|
||||
shape[1] = math.ceil(scale * shape[1])
|
||||
|
||||
# resize
|
||||
sample["image"] = cv2.resize(sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method)
|
||||
|
||||
sample["disparity"] = cv2.resize(sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST)
|
||||
sample["mask"] = cv2.resize(
|
||||
sample["mask"].astype(np.float32),
|
||||
tuple(shape[::-1]),
|
||||
interpolation=cv2.INTER_NEAREST,
|
||||
)
|
||||
sample["mask"] = sample["mask"].astype(bool)
|
||||
|
||||
return tuple(shape)
|
||||
|
||||
|
||||
class Resize(object):
|
||||
"""Resize sample to given size (width, height)."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
width,
|
||||
height,
|
||||
resize_target=True,
|
||||
keep_aspect_ratio=False,
|
||||
ensure_multiple_of=1,
|
||||
resize_method="lower_bound",
|
||||
image_interpolation_method=cv2.INTER_AREA,
|
||||
):
|
||||
"""Init.
|
||||
|
||||
Args:
|
||||
width (int): desired output width
|
||||
height (int): desired output height
|
||||
resize_target (bool, optional):
|
||||
True: Resize the full sample (image, mask, target).
|
||||
False: Resize image only.
|
||||
Defaults to True.
|
||||
keep_aspect_ratio (bool, optional):
|
||||
True: Keep the aspect ratio of the input sample.
|
||||
Output sample might not have the given width and height, and
|
||||
resize behaviour depends on the parameter 'resize_method'.
|
||||
Defaults to False.
|
||||
ensure_multiple_of (int, optional):
|
||||
Output width and height is constrained to be multiple of this parameter.
|
||||
Defaults to 1.
|
||||
resize_method (str, optional):
|
||||
"lower_bound": Output will be at least as large as the given size.
|
||||
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller
|
||||
than given size.)
|
||||
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
|
||||
Defaults to "lower_bound".
|
||||
"""
|
||||
self.__width = width
|
||||
self.__height = height
|
||||
|
||||
self.__resize_target = resize_target
|
||||
self.__keep_aspect_ratio = keep_aspect_ratio
|
||||
self.__multiple_of = ensure_multiple_of
|
||||
self.__resize_method = resize_method
|
||||
self.__image_interpolation_method = image_interpolation_method
|
||||
|
||||
def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
|
||||
y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
||||
|
||||
if max_val is not None and y > max_val:
|
||||
y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
||||
|
||||
if y < min_val:
|
||||
y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
||||
|
||||
return y
|
||||
|
||||
def get_size(self, width, height):
|
||||
# determine new height and width
|
||||
scale_height = self.__height / height
|
||||
scale_width = self.__width / width
|
||||
|
||||
if self.__keep_aspect_ratio:
|
||||
if self.__resize_method == "lower_bound":
|
||||
# scale such that output size is lower bound
|
||||
if scale_width > scale_height:
|
||||
# fit width
|
||||
scale_height = scale_width
|
||||
else:
|
||||
# fit height
|
||||
scale_width = scale_height
|
||||
elif self.__resize_method == "upper_bound":
|
||||
# scale such that output size is upper bound
|
||||
if scale_width < scale_height:
|
||||
# fit width
|
||||
scale_height = scale_width
|
||||
else:
|
||||
# fit height
|
||||
scale_width = scale_height
|
||||
elif self.__resize_method == "minimal":
|
||||
# scale as least as possbile
|
||||
if abs(1 - scale_width) < abs(1 - scale_height):
|
||||
# fit width
|
||||
scale_height = scale_width
|
||||
else:
|
||||
# fit height
|
||||
scale_width = scale_height
|
||||
else:
|
||||
raise ValueError(f"resize_method {self.__resize_method} not implemented")
|
||||
|
||||
if self.__resize_method == "lower_bound":
|
||||
new_height = self.constrain_to_multiple_of(scale_height * height, min_val=self.__height)
|
||||
new_width = self.constrain_to_multiple_of(scale_width * width, min_val=self.__width)
|
||||
elif self.__resize_method == "upper_bound":
|
||||
new_height = self.constrain_to_multiple_of(scale_height * height, max_val=self.__height)
|
||||
new_width = self.constrain_to_multiple_of(scale_width * width, max_val=self.__width)
|
||||
elif self.__resize_method == "minimal":
|
||||
new_height = self.constrain_to_multiple_of(scale_height * height)
|
||||
new_width = self.constrain_to_multiple_of(scale_width * width)
|
||||
else:
|
||||
raise ValueError(f"resize_method {self.__resize_method} not implemented")
|
||||
|
||||
return (new_width, new_height)
|
||||
|
||||
def __call__(self, sample):
|
||||
width, height = self.get_size(sample["image"].shape[1], sample["image"].shape[0])
|
||||
|
||||
# resize sample
|
||||
sample["image"] = cv2.resize(
|
||||
sample["image"],
|
||||
(width, height),
|
||||
interpolation=self.__image_interpolation_method,
|
||||
)
|
||||
|
||||
if self.__resize_target:
|
||||
if "disparity" in sample:
|
||||
sample["disparity"] = cv2.resize(
|
||||
sample["disparity"],
|
||||
(width, height),
|
||||
interpolation=cv2.INTER_NEAREST,
|
||||
)
|
||||
|
||||
if "depth" in sample:
|
||||
sample["depth"] = cv2.resize(sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST)
|
||||
|
||||
if "semseg_mask" in sample:
|
||||
# sample["semseg_mask"] = cv2.resize(
|
||||
# sample["semseg_mask"], (width, height), interpolation=cv2.INTER_NEAREST
|
||||
# )
|
||||
sample["semseg_mask"] = F.interpolate(
|
||||
torch.from_numpy(sample["semseg_mask"]).float()[None, None, ...], (height, width), mode="nearest"
|
||||
).numpy()[0, 0]
|
||||
|
||||
if "mask" in sample:
|
||||
sample["mask"] = cv2.resize(
|
||||
sample["mask"].astype(np.float32),
|
||||
(width, height),
|
||||
interpolation=cv2.INTER_NEAREST,
|
||||
)
|
||||
# sample["mask"] = sample["mask"].astype(bool)
|
||||
|
||||
# print(sample['image'].shape, sample['depth'].shape)
|
||||
return sample
|
||||
|
||||
|
||||
class NormalizeImage(object):
|
||||
"""Normlize image by given mean and std."""
|
||||
|
||||
def __init__(self, mean, std):
|
||||
self.__mean = mean
|
||||
self.__std = std
|
||||
|
||||
def __call__(self, sample):
|
||||
sample["image"] = (sample["image"] - self.__mean) / self.__std
|
||||
|
||||
return sample
|
||||
|
||||
|
||||
class PrepareForNet(object):
|
||||
"""Prepare sample for usage as network input."""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def __call__(self, sample):
|
||||
image = np.transpose(sample["image"], (2, 0, 1))
|
||||
sample["image"] = np.ascontiguousarray(image).astype(np.float32)
|
||||
|
||||
if "mask" in sample:
|
||||
sample["mask"] = sample["mask"].astype(np.float32)
|
||||
sample["mask"] = np.ascontiguousarray(sample["mask"])
|
||||
|
||||
if "depth" in sample:
|
||||
depth = sample["depth"].astype(np.float32)
|
||||
sample["depth"] = np.ascontiguousarray(depth)
|
||||
|
||||
if "semseg_mask" in sample:
|
||||
sample["semseg_mask"] = sample["semseg_mask"].astype(np.float32)
|
||||
sample["semseg_mask"] = np.ascontiguousarray(sample["semseg_mask"])
|
||||
|
||||
return sample
|