Compare commits
326 Commits
feat/diffu
...
merged-cha
Author | SHA1 | Date | |
---|---|---|---|
62d0b8c42b | |||
1869f34fba | |||
e225cf0613 | |||
3c79476785 | |||
dad364da17 | |||
37bc4f78d0 | |||
de0b43c81d | |||
ea1d2d6a4c | |||
fafe8ccc59 | |||
4b88cfac19 | |||
5fa13fba36 | |||
f28f761436 | |||
27d7889780 | |||
a1cf153097 | |||
d121eefa12 | |||
c92e25a6a7 | |||
8be03dead5 | |||
1197133d06 | |||
850458a554 | |||
e96ad41729 | |||
53cf518390 | |||
b00ace852d | |||
be72765d02 | |||
580d29257c | |||
5d068c1da1 | |||
8e2ccab1f0 | |||
6f478eef62 | |||
1ff1c370df | |||
5ef87ef2a6 | |||
d0709d4f4e | |||
2a081b0a27 | |||
d902533387 | |||
1174713223 | |||
4b1740ad19 | |||
e03c88ce32 | |||
b917ffecbe | |||
2967a78c5a | |||
aa25ea62a5 | |||
1ab0e86085 | |||
c9ddbb4241 | |||
415a1c7a4f | |||
84a4836ab7 | |||
dbd6c9c6ed | |||
4f95c077d4 | |||
0a4cbc4e16 | |||
d45b76fab4 | |||
9722135cda | |||
7366913a31 | |||
bd31b5606c | |||
2953dea4a0 | |||
f3fed0b10f | |||
db57d426d9 | |||
4536e4a8b6 | |||
426a7b900f | |||
cc571d9ab2 | |||
296c861e7d | |||
aa45d21fd2 | |||
ac42513da9 | |||
e2387546fe | |||
c8929b35f0 | |||
c000e270a0 | |||
8ff28da3b4 | |||
b7b376103c | |||
08d379bb29 | |||
74e644c4ba | |||
d4c36da3ee | |||
dfe0b73890 | |||
c0c8fa9a89 | |||
ad7139829c | |||
a24e63d440 | |||
59437a02c3 | |||
98a44d7fa1 | |||
07416753be | |||
630854ce26 | |||
b55c2b99a7 | |||
f81d36c95f | |||
26b7aadd32 | |||
8e7e3c2b4a | |||
f2e8b66be4 | |||
ff09fd30dc | |||
9fcc30c3d6 | |||
b29a6522ef | |||
936d19cd60 | |||
f25b6ee5d1 | |||
7dea079220 | |||
7fc08962fb | |||
71155d9e72 | |||
6ccd72349d | |||
30e12376d3 | |||
23c8a893e1 | |||
7d93329401 | |||
968fb655a4 | |||
80ec9f4131 | |||
f19def5f7b | |||
9e1dd8ac9c | |||
ebd68b7a6c | |||
68a231afea | |||
21ab650ac0 | |||
b501bd709f | |||
4082f25062 | |||
63d74b4ba6 | |||
da5907613b | |||
3a9201bd31 | |||
d6e2cb7cef | |||
0809e832d4 | |||
7269c9f02e | |||
d86d7e5c33 | |||
5d87578746 | |||
04aef021fc | |||
0fc08bb384 | |||
5779542084 | |||
ebda81e96e | |||
3fe332e85f | |||
3428ea1b3c | |||
6024fc7baf | |||
75c1c4ce5a | |||
ffa05a0bb3 | |||
a20e17330b | |||
4e83644433 | |||
604f0083f2 | |||
2a8a158823 | |||
f8c3db72e9 | |||
60815807f9 | |||
196fb0e014 | |||
eba668956d | |||
ee5ec023f4 | |||
d59661e0af | |||
f51e8eeae1 | |||
6e06935e75 | |||
f7f697849c | |||
8e17e29a5c | |||
12e9f17f7a | |||
cb7e56a9a3 | |||
1a710a4c12 | |||
d8d266d3be | |||
4716632c23 | |||
3c4150d153 | |||
b71b14d582 | |||
73481d4aec | |||
2c049a3b94 | |||
367de44a8b | |||
f5f378d04b | |||
823edbfdef | |||
29bbb27289 | |||
a23502f7ff | |||
ce64dbefce | |||
b47afdc3b5 | |||
cde9c3090f | |||
6924b04d7c | |||
83fbd4bdf2 | |||
6460dcc7e0 | |||
59aa009c93 | |||
59d2a012cd | |||
7e3b620830 | |||
e16b55816f | |||
895cb8637e | |||
fe5bceb1ed | |||
5d475a40f5 | |||
bca7ea1674 | |||
f27bb402fb | |||
dd32c632cd | |||
9e2e740033 | |||
d6362ce0bd | |||
2347a00a70 | |||
0b7dc721cf | |||
ac04a834ef | |||
bbca053b48 | |||
fcf2006502 | |||
ac0d0019bd | |||
2d922a0a65 | |||
8db14911d7 | |||
01bab58b20 | |||
7a57bc99cf | |||
d3b6d86e74 | |||
360b6cb286 | |||
8f9e9e639e | |||
6930d8ba41 | |||
7ad74e680d | |||
c56a6a4ddd | |||
afad764a00 | |||
49a72bd714 | |||
8cf14287b6 | |||
0db47dd5e7 | |||
71f6f77ae8 | |||
6f16229c41 | |||
0cc0d794d1 | |||
535639cb95 | |||
2250bca8d9 | |||
4ce39a5974 | |||
644e9287f0 | |||
6a5e0be022 | |||
707f0f7091 | |||
8e709fe05a | |||
154da609cb | |||
21975d6268 | |||
31035b3e63 | |||
6c05818887 | |||
77c5b051f0 | |||
4fdc4c15f9 | |||
1a4be78013 | |||
eb16ad3d6f | |||
1fee08639d | |||
7caaf40835 | |||
6bfe994622 | |||
8a6f03cd46 | |||
4ce9f9dc36 | |||
00297716d6 | |||
50c0dc71eb | |||
29ccc6a3d8 | |||
f92a5cbabc | |||
acbf10f7ba | |||
46d830b9fa | |||
db17ec7a4b | |||
6320d18846 | |||
37c8b9d06a | |||
7ba2108eb0 | |||
8aeeee4752 | |||
930de51910 | |||
b1b5c0d3b2 | |||
ebe717099e | |||
06245bc761 | |||
b4c0dafdc8 | |||
0cefacb3a2 | |||
baa5f75976 | |||
989aaedc7f | |||
93e08df849 | |||
4a43e1c1b8 | |||
2bbab9d94e | |||
a456f6e6f0 | |||
a408f562d6 | |||
cefdf9ed00 | |||
5413bf07e2 | |||
4cffe282bd | |||
ae8ffe9d51 | |||
870cc5b733 | |||
0b4eb888c5 | |||
11f1cb5391 | |||
1e2e26cfc2 | |||
e9bce6e1c3 | |||
799ef0e7c1 | |||
61c10a7ca8 | |||
93880223e6 | |||
271456b745 | |||
cecee33bc0 | |||
4f43eda09b | |||
011757c497 | |||
2700d0e769 | |||
d256d93a2a | |||
f3c8e986a5 | |||
48f5e4f313 | |||
5950ffe064 | |||
49ca949cd6 | |||
5d69f1cbf5 | |||
9169006171 | |||
28b74523d0 | |||
9359c03c3c | |||
598241e0f2 | |||
e698a8006c | |||
34e7b5a7fb | |||
5c3dd62ae0 | |||
7e2eeec1f3 | |||
7eb79266c4 | |||
5d4610d981 | |||
7c548c5bf3 | |||
2a38606342 | |||
793cf39964 | |||
ab3e689ee0 | |||
20f497054f | |||
6209fef63d | |||
5168415999 | |||
b490c8ae27 | |||
6f354f16ba | |||
e108a2302e | |||
2ffecef792 | |||
2663a07e94 | |||
8d2ef5afc3 | |||
539887b215 | |||
2ba505cce9 | |||
bd92a31d15 | |||
ee2529f3fd | |||
89b7082bc0 | |||
55dfabb892 | |||
2a41fd0b29 | |||
966919ea4a | |||
d3acdcf12f | |||
52f9749bf5 | |||
2a661450c3 | |||
2d96c62fdb | |||
3e6173ee8c | |||
4e9841c924 | |||
f4ea495d23 | |||
43a4b815e8 | |||
4134f18319 | |||
cd292f6c1c | |||
3ce8f3d6fe | |||
10fd4f6a61 | |||
47b1fd4bce | |||
300805a25a | |||
56527da73e | |||
ca4b8e65c1 | |||
f5194f9e2d | |||
ccbbb417f9 | |||
37786a26a5 | |||
4f2930412e | |||
83049a3a5b | |||
38256f97b3 | |||
77f2aabda4 | |||
e32eb2a649 | |||
f4cdfa3b9c | |||
e99b715e9e | |||
ed96c40239 | |||
1b3bb932b9 | |||
f0b102d830 | |||
a47d91f0e7 | |||
358c1f5791 | |||
faec320d48 | |||
fd074abdc4 | |||
d8eb58cd58 | |||
8937d66412 | |||
a6935ae7fb | |||
69968eb67b | |||
e57f5f129c | |||
1b8651fa26 | |||
f6664960ca | |||
84a001720c | |||
c9951cd86b |
8
.github/CODEOWNERS
vendored
@ -1,5 +1,5 @@
|
|||||||
# continuous integration
|
# continuous integration
|
||||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername
|
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr
|
||||||
|
|
||||||
# documentation
|
# documentation
|
||||||
/docs/ @lstein @blessedcoolant @hipsterusername @Millu
|
/docs/ @lstein @blessedcoolant @hipsterusername @Millu
|
||||||
@ -10,7 +10,7 @@
|
|||||||
|
|
||||||
# installation and configuration
|
# installation and configuration
|
||||||
/pyproject.toml @lstein @blessedcoolant @hipsterusername
|
/pyproject.toml @lstein @blessedcoolant @hipsterusername
|
||||||
/docker/ @lstein @blessedcoolant @hipsterusername
|
/docker/ @lstein @blessedcoolant @hipsterusername @ebr
|
||||||
/scripts/ @ebr @lstein @hipsterusername
|
/scripts/ @ebr @lstein @hipsterusername
|
||||||
/installer/ @lstein @ebr @hipsterusername
|
/installer/ @lstein @ebr @hipsterusername
|
||||||
/invokeai/assets @lstein @ebr @hipsterusername
|
/invokeai/assets @lstein @ebr @hipsterusername
|
||||||
@ -26,9 +26,7 @@
|
|||||||
|
|
||||||
# front ends
|
# front ends
|
||||||
/invokeai/frontend/CLI @lstein @hipsterusername
|
/invokeai/frontend/CLI @lstein @hipsterusername
|
||||||
/invokeai/frontend/install @lstein @ebr @hipsterusername
|
/invokeai/frontend/install @lstein @ebr @hipsterusername
|
||||||
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
|
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
|
||||||
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
|
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
|
||||||
/invokeai/frontend/web @psychedelicious @blessedcoolant @maryhipp @hipsterusername
|
/invokeai/frontend/web @psychedelicious @blessedcoolant @maryhipp @hipsterusername
|
||||||
|
|
||||||
|
|
||||||
|
5
.github/workflows/build-container.yml
vendored
@ -40,10 +40,14 @@ jobs:
|
|||||||
- name: Free up more disk space on the runner
|
- name: Free up more disk space on the runner
|
||||||
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
|
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
|
||||||
run: |
|
run: |
|
||||||
|
echo "----- Free space before cleanup"
|
||||||
|
df -h
|
||||||
sudo rm -rf /usr/share/dotnet
|
sudo rm -rf /usr/share/dotnet
|
||||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||||
sudo swapoff /mnt/swapfile
|
sudo swapoff /mnt/swapfile
|
||||||
sudo rm -rf /mnt/swapfile
|
sudo rm -rf /mnt/swapfile
|
||||||
|
echo "----- Free space after cleanup"
|
||||||
|
df -h
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -91,6 +95,7 @@ jobs:
|
|||||||
# password: ${{ secrets.DOCKERHUB_TOKEN }}
|
# password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build container
|
- name: Build container
|
||||||
|
timeout-minutes: 40
|
||||||
id: docker_build
|
id: docker_build
|
||||||
uses: docker/build-push-action@v4
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
|
29
.github/workflows/change-monitor.yml
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
name: Trigger Target Workflow
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
trigger:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Trigger Workflow in Another Repository
|
||||||
|
run: |
|
||||||
|
# Set the required variables
|
||||||
|
repo_owner="invoke-ai"
|
||||||
|
repo_name="Invoke"
|
||||||
|
event_type="invokeai-pr-merge"
|
||||||
|
service=${{ github.event.inputs.target_service }}"
|
||||||
|
version="${{ github.event.inputs.target_version }}"
|
||||||
|
|
||||||
|
curl -L \
|
||||||
|
-X POST \
|
||||||
|
-H "Accept: application/vnd.github+json" \
|
||||||
|
-H "Authorization: Bearer ${{ secrets.PAT }}" \
|
||||||
|
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||||
|
https://api.github.com/repos/$repo_owner/$repo_name/dispatches \
|
||||||
|
-d "{\"event_type\": \"$event_type\", \"client_payload\": {\"service\": \"$service\", \"version\": \"$version\", \"unit\": false, \"integration\": true}}"
|
2
.github/workflows/test-invoke-pip.yml
vendored
@ -58,7 +58,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Check for changed python files
|
- name: Check for changed python files
|
||||||
id: changed-files
|
id: changed-files
|
||||||
uses: tj-actions/changed-files@v37
|
uses: tj-actions/changed-files@v41
|
||||||
with:
|
with:
|
||||||
files_yaml: |
|
files_yaml: |
|
||||||
python:
|
python:
|
||||||
|
12
README.md
@ -1,10 +1,10 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
# Invoke AI - Generative AI for Professional Creatives
|
# Invoke - Professional Creative AI Tools for Visual Media
|
||||||
## Professional Creative Tools for Stable Diffusion, Custom-Trained Models, and more.
|
## To learn more about Invoke, or implement our Business solutions, visit [invoke.com](https://www.invoke.com/about)
|
||||||
To learn more about Invoke AI, get started instantly, or implement our Business solutions, visit [invoke.ai](https://invoke.ai)
|
|
||||||
|
|
||||||
|
|
||||||
[![discord badge]][discord link]
|
[![discord badge]][discord link]
|
||||||
@ -56,7 +56,9 @@ the foundation for multiple commercial products.
|
|||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||

|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
@ -2,10 +2,13 @@
|
|||||||
## Any environment variables supported by InvokeAI can be specified here,
|
## Any environment variables supported by InvokeAI can be specified here,
|
||||||
## in addition to the examples below.
|
## in addition to the examples below.
|
||||||
|
|
||||||
# INVOKEAI_ROOT is the path to a path on the local filesystem where InvokeAI will store data.
|
# HOST_INVOKEAI_ROOT is the path on the docker host's filesystem where InvokeAI will store data.
|
||||||
# Outputs will also be stored here by default.
|
# Outputs will also be stored here by default.
|
||||||
# This **must** be an absolute path.
|
# If relative, it will be relative to the docker directory in which the docker-compose.yml file is located
|
||||||
INVOKEAI_ROOT=
|
#HOST_INVOKEAI_ROOT=../../invokeai-data
|
||||||
|
|
||||||
|
# INVOKEAI_ROOT is the path to the root of the InvokeAI repository within the container.
|
||||||
|
# INVOKEAI_ROOT=~/invokeai
|
||||||
|
|
||||||
# Get this value from your HuggingFace account settings page.
|
# Get this value from your HuggingFace account settings page.
|
||||||
# HUGGING_FACE_HUB_TOKEN=
|
# HUGGING_FACE_HUB_TOKEN=
|
||||||
|
@ -59,7 +59,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
|||||||
|
|
||||||
# #### Build the Web UI ------------------------------------
|
# #### Build the Web UI ------------------------------------
|
||||||
|
|
||||||
FROM node:18-slim AS web-builder
|
FROM node:20-slim AS web-builder
|
||||||
ENV PNPM_HOME="/pnpm"
|
ENV PNPM_HOME="/pnpm"
|
||||||
ENV PATH="$PNPM_HOME:$PATH"
|
ENV PATH="$PNPM_HOME:$PATH"
|
||||||
RUN corepack enable
|
RUN corepack enable
|
||||||
@ -68,7 +68,7 @@ WORKDIR /build
|
|||||||
COPY invokeai/frontend/web/ ./
|
COPY invokeai/frontend/web/ ./
|
||||||
RUN --mount=type=cache,target=/pnpm/store \
|
RUN --mount=type=cache,target=/pnpm/store \
|
||||||
pnpm install --frozen-lockfile
|
pnpm install --frozen-lockfile
|
||||||
RUN pnpm run build
|
RUN npx vite build
|
||||||
|
|
||||||
#### Runtime stage ---------------------------------------
|
#### Runtime stage ---------------------------------------
|
||||||
|
|
||||||
|
@ -21,7 +21,9 @@ x-invokeai: &invokeai
|
|||||||
ports:
|
ports:
|
||||||
- "${INVOKEAI_PORT:-9090}:9090"
|
- "${INVOKEAI_PORT:-9090}:9090"
|
||||||
volumes:
|
volumes:
|
||||||
- ${INVOKEAI_ROOT:-~/invokeai}:${INVOKEAI_ROOT:-/invokeai}
|
- type: bind
|
||||||
|
source: ${HOST_INVOKEAI_ROOT:-${INVOKEAI_ROOT:-~/invokeai}}
|
||||||
|
target: ${INVOKEAI_ROOT:-/invokeai}
|
||||||
- ${HF_HOME:-~/.cache/huggingface}:${HF_HOME:-/invokeai/.cache/huggingface}
|
- ${HF_HOME:-~/.cache/huggingface}:${HF_HOME:-/invokeai/.cache/huggingface}
|
||||||
# - ${INVOKEAI_MODELS_DIR:-${INVOKEAI_ROOT:-/invokeai/models}}
|
# - ${INVOKEAI_MODELS_DIR:-${INVOKEAI_ROOT:-/invokeai/models}}
|
||||||
# - ${INVOKEAI_MODELS_CONFIG_PATH:-${INVOKEAI_ROOT:-/invokeai/configs/models.yaml}}
|
# - ${INVOKEAI_MODELS_CONFIG_PATH:-${INVOKEAI_ROOT:-/invokeai/configs/models.yaml}}
|
||||||
|
Before Width: | Height: | Size: 297 KiB After Width: | Height: | Size: 46 KiB |
Before Width: | Height: | Size: 1.1 MiB After Width: | Height: | Size: 4.9 MiB |
Before Width: | Height: | Size: 169 KiB After Width: | Height: | Size: 1.1 MiB |
Before Width: | Height: | Size: 194 KiB After Width: | Height: | Size: 131 KiB |
Before Width: | Height: | Size: 209 KiB After Width: | Height: | Size: 122 KiB |
Before Width: | Height: | Size: 114 KiB After Width: | Height: | Size: 95 KiB |
Before Width: | Height: | Size: 187 KiB After Width: | Height: | Size: 123 KiB |
Before Width: | Height: | Size: 112 KiB After Width: | Height: | Size: 107 KiB |
Before Width: | Height: | Size: 132 KiB After Width: | Height: | Size: 61 KiB |
Before Width: | Height: | Size: 167 KiB After Width: | Height: | Size: 119 KiB |
Before Width: | Height: | Size: 70 KiB |
Before Width: | Height: | Size: 59 KiB After Width: | Height: | Size: 60 KiB |
BIN
docs/assets/nodes/workflow_library.png
Normal file
After Width: | Height: | Size: 129 KiB |
@ -15,8 +15,13 @@ model. These are the:
|
|||||||
their metadata, and `ModelRecordServiceBase` to store that
|
their metadata, and `ModelRecordServiceBase` to store that
|
||||||
information. It is also responsible for managing the InvokeAI
|
information. It is also responsible for managing the InvokeAI
|
||||||
`models` directory and its contents.
|
`models` directory and its contents.
|
||||||
|
|
||||||
* _DownloadQueueServiceBase_ (**CURRENTLY UNDER DEVELOPMENT - NOT IMPLEMENTED**)
|
* _ModelMetadataStore_ and _ModelMetaDataFetch_ Backend modules that
|
||||||
|
are able to retrieve metadata from online model repositories,
|
||||||
|
transform them into Pydantic models, and cache them to the InvokeAI
|
||||||
|
SQL database.
|
||||||
|
|
||||||
|
* _DownloadQueueServiceBase_
|
||||||
A multithreaded downloader responsible
|
A multithreaded downloader responsible
|
||||||
for downloading models from a remote source to disk. The download
|
for downloading models from a remote source to disk. The download
|
||||||
queue has special methods for downloading repo_id folders from
|
queue has special methods for downloading repo_id folders from
|
||||||
@ -30,13 +35,13 @@ model. These are the:
|
|||||||
|
|
||||||
## Location of the Code
|
## Location of the Code
|
||||||
|
|
||||||
All four of these services can be found in
|
The four main services can be found in
|
||||||
`invokeai/app/services` in the following directories:
|
`invokeai/app/services` in the following directories:
|
||||||
|
|
||||||
* `invokeai/app/services/model_records/`
|
* `invokeai/app/services/model_records/`
|
||||||
* `invokeai/app/services/model_install/`
|
* `invokeai/app/services/model_install/`
|
||||||
|
* `invokeai/app/services/downloads/`
|
||||||
* `invokeai/app/services/model_loader/` (**under development**)
|
* `invokeai/app/services/model_loader/` (**under development**)
|
||||||
* `invokeai/app/services/downloads/`(**under development**)
|
|
||||||
|
|
||||||
Code related to the FastAPI web API can be found in
|
Code related to the FastAPI web API can be found in
|
||||||
`invokeai/app/api/routers/model_records.py`.
|
`invokeai/app/api/routers/model_records.py`.
|
||||||
@ -402,15 +407,18 @@ functionality:
|
|||||||
the download, installation and registration process.
|
the download, installation and registration process.
|
||||||
|
|
||||||
- Downloading a model from an arbitrary URL and installing it in
|
- Downloading a model from an arbitrary URL and installing it in
|
||||||
`models_dir` (_implementation pending_).
|
`models_dir`.
|
||||||
|
|
||||||
- Special handling for Civitai model URLs which allow the user to
|
- Special handling for Civitai model URLs which allow the user to
|
||||||
paste in a model page's URL or download link (_implementation pending_).
|
paste in a model page's URL or download link
|
||||||
|
|
||||||
|
|
||||||
- Special handling for HuggingFace repo_ids to recursively download
|
- Special handling for HuggingFace repo_ids to recursively download
|
||||||
the contents of the repository, paying attention to alternative
|
the contents of the repository, paying attention to alternative
|
||||||
variants such as fp16. (_implementation pending_)
|
variants such as fp16.
|
||||||
|
|
||||||
|
- Saving tags and other metadata about the model into the invokeai database
|
||||||
|
when fetching from a repo that provides that type of information,
|
||||||
|
(currently only Civitai and HuggingFace).
|
||||||
|
|
||||||
### Initializing the installer
|
### Initializing the installer
|
||||||
|
|
||||||
@ -426,16 +434,24 @@ following initialization pattern:
|
|||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
from invokeai.app.services.model_records import ModelRecordServiceSQL
|
from invokeai.app.services.model_records import ModelRecordServiceSQL
|
||||||
from invokeai.app.services.model_install import ModelInstallService
|
from invokeai.app.services.model_install import ModelInstallService
|
||||||
|
from invokeai.app.services.download import DownloadQueueService
|
||||||
from invokeai.app.services.shared.sqlite import SqliteDatabase
|
from invokeai.app.services.shared.sqlite import SqliteDatabase
|
||||||
from invokeai.backend.util.logging import InvokeAILogger
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
|
|
||||||
config = InvokeAIAppConfig.get_config()
|
config = InvokeAIAppConfig.get_config()
|
||||||
config.parse_args()
|
config.parse_args()
|
||||||
|
|
||||||
logger = InvokeAILogger.get_logger(config=config)
|
logger = InvokeAILogger.get_logger(config=config)
|
||||||
db = SqliteDatabase(config, logger)
|
db = SqliteDatabase(config, logger)
|
||||||
|
record_store = ModelRecordServiceSQL(db)
|
||||||
|
queue = DownloadQueueService()
|
||||||
|
queue.start()
|
||||||
|
|
||||||
store = ModelRecordServiceSQL(db)
|
installer = ModelInstallService(app_config=config,
|
||||||
installer = ModelInstallService(config, store)
|
record_store=record_store,
|
||||||
|
download_queue=queue
|
||||||
|
)
|
||||||
|
installer.start()
|
||||||
```
|
```
|
||||||
|
|
||||||
The full form of `ModelInstallService()` takes the following
|
The full form of `ModelInstallService()` takes the following
|
||||||
@ -443,9 +459,12 @@ required parameters:
|
|||||||
|
|
||||||
| **Argument** | **Type** | **Description** |
|
| **Argument** | **Type** | **Description** |
|
||||||
|------------------|------------------------------|------------------------------|
|
|------------------|------------------------------|------------------------------|
|
||||||
| `config` | InvokeAIAppConfig | InvokeAI app configuration object |
|
| `app_config` | InvokeAIAppConfig | InvokeAI app configuration object |
|
||||||
| `record_store` | ModelRecordServiceBase | Config record storage database |
|
| `record_store` | ModelRecordServiceBase | Config record storage database |
|
||||||
| `event_bus` | EventServiceBase | Optional event bus to send download/install progress events to |
|
| `download_queue` | DownloadQueueServiceBase | Download queue object |
|
||||||
|
| `metadata_store` | Optional[ModelMetadataStore] | Metadata storage object |
|
||||||
|
|`session` | Optional[requests.Session] | Swap in a different Session object (usually for debugging) |
|
||||||
|
|
||||||
|
|
||||||
Once initialized, the installer will provide the following methods:
|
Once initialized, the installer will provide the following methods:
|
||||||
|
|
||||||
@ -474,14 +493,14 @@ source7 = URLModelSource(url='https://civitai.com/api/download/models/63006', ac
|
|||||||
for source in [source1, source2, source3, source4, source5, source6, source7]:
|
for source in [source1, source2, source3, source4, source5, source6, source7]:
|
||||||
install_job = installer.install_model(source)
|
install_job = installer.install_model(source)
|
||||||
|
|
||||||
source2job = installer.wait_for_installs()
|
source2job = installer.wait_for_installs(timeout=120)
|
||||||
for source in sources:
|
for source in sources:
|
||||||
job = source2job[source]
|
job = source2job[source]
|
||||||
if job.status == "completed":
|
if job.complete:
|
||||||
model_config = job.config_out
|
model_config = job.config_out
|
||||||
model_key = model_config.key
|
model_key = model_config.key
|
||||||
print(f"{source} installed as {model_key}")
|
print(f"{source} installed as {model_key}")
|
||||||
elif job.status == "error":
|
elif job.errored:
|
||||||
print(f"{source}: {job.error_type}.\nStack trace:\n{job.error}")
|
print(f"{source}: {job.error_type}.\nStack trace:\n{job.error}")
|
||||||
|
|
||||||
```
|
```
|
||||||
@ -515,43 +534,117 @@ The full list of arguments to `import_model()` is as follows:
|
|||||||
|
|
||||||
| **Argument** | **Type** | **Default** | **Description** |
|
| **Argument** | **Type** | **Default** | **Description** |
|
||||||
|------------------|------------------------------|-------------|-------------------------------------------|
|
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||||
| `source` | Union[str, Path, AnyHttpUrl] | | The source of the model, Path, URL or repo_id |
|
| `source` | ModelSource | None | The source of the model, Path, URL or repo_id |
|
||||||
| `inplace` | bool | True | Leave a local model in its current location |
|
|
||||||
| `variant` | str | None | Desired variant, such as 'fp16' or 'onnx' (HuggingFace only) |
|
|
||||||
| `subfolder` | str | None | Repository subfolder (HuggingFace only) |
|
|
||||||
| `config` | Dict[str, Any] | None | Override all or a portion of model's probed attributes |
|
| `config` | Dict[str, Any] | None | Override all or a portion of model's probed attributes |
|
||||||
| `access_token` | str | None | Provide authorization information needed to download |
|
|
||||||
|
|
||||||
|
The next few sections describe the various types of ModelSource that
|
||||||
The `inplace` field controls how local model Paths are handled. If
|
can be passed to `import_model()`.
|
||||||
True (the default), then the model is simply registered in its current
|
|
||||||
location by the installer's `ModelConfigRecordService`. Otherwise, a
|
|
||||||
copy of the model put into the location specified by the `models_dir`
|
|
||||||
application configuration parameter.
|
|
||||||
|
|
||||||
The `variant` field is used for HuggingFace repo_ids only. If
|
|
||||||
provided, the repo_id download handler will look for and download
|
|
||||||
tensors files that follow the convention for the selected variant:
|
|
||||||
|
|
||||||
- "fp16" will select files named "*model.fp16.{safetensors,bin}"
|
|
||||||
- "onnx" will select files ending with the suffix ".onnx"
|
|
||||||
- "openvino" will select files beginning with "openvino_model"
|
|
||||||
|
|
||||||
In the special case of the "fp16" variant, the installer will select
|
|
||||||
the 32-bit version of the files if the 16-bit version is unavailable.
|
|
||||||
|
|
||||||
`subfolder` is used for HuggingFace repo_ids only. If provided, the
|
|
||||||
model will be downloaded from the designated subfolder rather than the
|
|
||||||
top-level repository folder. If a subfolder is attached to the repo_id
|
|
||||||
using the format `repo_owner/repo_name:subfolder`, then the subfolder
|
|
||||||
specified by the repo_id will override the subfolder argument.
|
|
||||||
|
|
||||||
`config` can be used to override all or a portion of the configuration
|
`config` can be used to override all or a portion of the configuration
|
||||||
attributes returned by the model prober. See the section below for
|
attributes returned by the model prober. See the section below for
|
||||||
details.
|
details.
|
||||||
|
|
||||||
`access_token` is passed to the download queue and used to access
|
|
||||||
repositories that require it.
|
#### LocalModelSource
|
||||||
|
|
||||||
|
This is used for a model that is located on a locally-accessible Posix
|
||||||
|
filesystem, such as a local disk or networked fileshare.
|
||||||
|
|
||||||
|
|
||||||
|
| **Argument** | **Type** | **Default** | **Description** |
|
||||||
|
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||||
|
| `path` | str | Path | None | Path to the model file or directory |
|
||||||
|
| `inplace` | bool | False | If set, the model file(s) will be left in their location; otherwise they will be copied into the InvokeAI root's `models` directory |
|
||||||
|
|
||||||
|
#### URLModelSource
|
||||||
|
|
||||||
|
This is used for a single-file model that is accessible via a URL. The
|
||||||
|
fields are:
|
||||||
|
|
||||||
|
| **Argument** | **Type** | **Default** | **Description** |
|
||||||
|
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||||
|
| `url` | AnyHttpUrl | None | The URL for the model file. |
|
||||||
|
| `access_token` | str | None | An access token needed to gain access to this file. |
|
||||||
|
|
||||||
|
The `AnyHttpUrl` class can be imported from `pydantic.networks`.
|
||||||
|
|
||||||
|
Ordinarily, no metadata is retrieved from these sources. However,
|
||||||
|
there is special-case code in the installer that looks for HuggingFace
|
||||||
|
and Civitai URLs and fetches the corresponding model metadata from
|
||||||
|
the corresponding repo.
|
||||||
|
|
||||||
|
#### CivitaiModelSource
|
||||||
|
|
||||||
|
This is used for a model that is hosted by the Civitai web site.
|
||||||
|
|
||||||
|
| **Argument** | **Type** | **Default** | **Description** |
|
||||||
|
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||||
|
| `version_id` | int | None | The ID of the particular version of the desired model. |
|
||||||
|
| `access_token` | str | None | An access token needed to gain access to a subscriber's-only model. |
|
||||||
|
|
||||||
|
Civitai has two model IDs, both of which are integers. The `model_id`
|
||||||
|
corresponds to a collection of model versions that may different in
|
||||||
|
arbitrary ways, such as derivation from different checkpoint training
|
||||||
|
steps, SFW vs NSFW generation, pruned vs non-pruned, etc. The
|
||||||
|
`version_id` points to a specific version. Please use the latter.
|
||||||
|
|
||||||
|
Some Civitai models require an access token to download. These can be
|
||||||
|
generated from the Civitai profile page of a logged-in
|
||||||
|
account. Somewhat annoyingly, if you fail to provide the access token
|
||||||
|
when downloading a model that needs it, Civitai generates a redirect
|
||||||
|
to a login page rather than a 403 Forbidden error. The installer
|
||||||
|
attempts to catch this event and issue an informative error
|
||||||
|
message. Otherwise you will get an "unrecognized model suffix" error
|
||||||
|
when the model prober tries to identify the type of the HTML login
|
||||||
|
page.
|
||||||
|
|
||||||
|
#### HFModelSource
|
||||||
|
|
||||||
|
HuggingFace has the most complicated `ModelSource` structure:
|
||||||
|
|
||||||
|
| **Argument** | **Type** | **Default** | **Description** |
|
||||||
|
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||||
|
| `repo_id` | str | None | The ID of the desired model. |
|
||||||
|
| `variant` | ModelRepoVariant | ModelRepoVariant('fp16') | The desired variant. |
|
||||||
|
| `subfolder` | Path | None | Look for the model in a subfolder of the repo. |
|
||||||
|
| `access_token` | str | None | An access token needed to gain access to a subscriber's-only model. |
|
||||||
|
|
||||||
|
|
||||||
|
The `repo_id` is the repository ID, such as `stabilityai/sdxl-turbo`.
|
||||||
|
|
||||||
|
The `variant` is one of the various diffusers formats that HuggingFace
|
||||||
|
supports and is used to pick out from the hodgepodge of files that in
|
||||||
|
a typical HuggingFace repository the particular components needed for
|
||||||
|
a complete diffusers model. `ModelRepoVariant` is an enum that can be
|
||||||
|
imported from `invokeai.backend.model_manager` and has the following
|
||||||
|
values:
|
||||||
|
|
||||||
|
| **Name** | **String Value** |
|
||||||
|
|----------------------------|---------------------------|
|
||||||
|
| ModelRepoVariant.DEFAULT | "default" |
|
||||||
|
| ModelRepoVariant.FP16 | "fp16" |
|
||||||
|
| ModelRepoVariant.FP32 | "fp32" |
|
||||||
|
| ModelRepoVariant.ONNX | "onnx" |
|
||||||
|
| ModelRepoVariant.OPENVINO | "openvino" |
|
||||||
|
| ModelRepoVariant.FLAX | "flax" |
|
||||||
|
|
||||||
|
You can also pass the string forms to `variant` directly. Note that
|
||||||
|
InvokeAI may not be able to load and run all variants. At the current
|
||||||
|
time, specifying `ModelRepoVariant.DEFAULT` will retrieve model files
|
||||||
|
that are unqualified, e.g. `pytorch_model.safetensors` rather than
|
||||||
|
`pytorch_model.fp16.safetensors`. These are usually the 32-bit
|
||||||
|
safetensors forms of the model.
|
||||||
|
|
||||||
|
If `subfolder` is specified, then the requested model resides in a
|
||||||
|
subfolder of the main model repository. This is typically used to
|
||||||
|
fetch and install VAEs.
|
||||||
|
|
||||||
|
Some models require you to be registered with HuggingFace and logged
|
||||||
|
in. To download these files, you must provide an
|
||||||
|
`access_token`. Internally, if no access token is provided, then
|
||||||
|
`HfFolder.get_token()` will be called to fill it in with the cached
|
||||||
|
one.
|
||||||
|
|
||||||
|
|
||||||
#### Monitoring the install job process
|
#### Monitoring the install job process
|
||||||
|
|
||||||
@ -563,7 +656,8 @@ The `ModelInstallJob` class has the following structure:
|
|||||||
|
|
||||||
| **Attribute** | **Type** | **Description** |
|
| **Attribute** | **Type** | **Description** |
|
||||||
|----------------|-----------------|------------------|
|
|----------------|-----------------|------------------|
|
||||||
| `status` | `InstallStatus` | An enum of ["waiting", "running", "completed" and "error" |
|
| `id` | `int` | Integer ID for this job |
|
||||||
|
| `status` | `InstallStatus` | An enum of [`waiting`, `downloading`, `running`, `completed`, `error` and `cancelled`]|
|
||||||
| `config_in` | `dict` | Overriding configuration values provided by the caller |
|
| `config_in` | `dict` | Overriding configuration values provided by the caller |
|
||||||
| `config_out` | `AnyModelConfig`| After successful completion, contains the configuration record written to the database |
|
| `config_out` | `AnyModelConfig`| After successful completion, contains the configuration record written to the database |
|
||||||
| `inplace` | `boolean` | True if the caller asked to install the model in place using its local path |
|
| `inplace` | `boolean` | True if the caller asked to install the model in place using its local path |
|
||||||
@ -578,30 +672,70 @@ broadcast to the InvokeAI event bus. The events will appear on the bus
|
|||||||
as an event of type `EventServiceBase.model_event`, a timestamp and
|
as an event of type `EventServiceBase.model_event`, a timestamp and
|
||||||
the following event names:
|
the following event names:
|
||||||
|
|
||||||
- `model_install_started`
|
##### `model_install_downloading`
|
||||||
|
|
||||||
The payload will contain the keys `timestamp` and `source`. The latter
|
For remote models only, `model_install_downloading` events will be issued at regular
|
||||||
indicates the requested model source for installation.
|
intervals as the download progresses. The event's payload contains the
|
||||||
|
following keys:
|
||||||
|
|
||||||
- `model_install_progress`
|
| **Key** | **Type** | **Description** |
|
||||||
|
|----------------|-----------|------------------|
|
||||||
|
| `source` | str | String representation of the requested source |
|
||||||
|
| `local_path` | str | String representation of the path to the downloading model (usually a temporary directory) |
|
||||||
|
| `bytes` | int | How many bytes downloaded so far |
|
||||||
|
| `total_bytes` | int | Total size of all the files that make up the model |
|
||||||
|
| `parts` | List[Dict]| Information on the progress of the individual files that make up the model |
|
||||||
|
|
||||||
Emitted at regular intervals when downloading a remote model, the
|
|
||||||
payload will contain the keys `timestamp`, `source`, `current_bytes`
|
|
||||||
and `total_bytes`. These events are _not_ emitted when a local model
|
|
||||||
already on the filesystem is imported.
|
|
||||||
|
|
||||||
- `model_install_completed`
|
The parts is a list of dictionaries that give information on each of
|
||||||
|
the components pieces of the download. The dictionary's keys are
|
||||||
|
`source`, `local_path`, `bytes` and `total_bytes`, and correspond to
|
||||||
|
the like-named keys in the main event.
|
||||||
|
|
||||||
Issued once at the end of a successful installation. The payload will
|
Note that downloading events will not be issued for local models, and
|
||||||
contain the keys `timestamp`, `source` and `key`, where `key` is the
|
that downloading events occur *before* the running event.
|
||||||
ID under which the model has been registered.
|
|
||||||
|
|
||||||
- `model_install_error`
|
##### `model_install_running`
|
||||||
|
|
||||||
|
`model_install_running` is issued when all the required downloads have completed (if applicable) and the
|
||||||
|
model probing, copying and registration process has now started.
|
||||||
|
|
||||||
|
The payload will contain the key `source`.
|
||||||
|
|
||||||
|
##### `model_install_completed`
|
||||||
|
|
||||||
|
`model_install_completed` is issued once at the end of a successful
|
||||||
|
installation. The payload will contain the keys `source`,
|
||||||
|
`total_bytes` and `key`, where `key` is the ID under which the model
|
||||||
|
has been registered.
|
||||||
|
|
||||||
|
##### `model_install_error`
|
||||||
|
|
||||||
|
`model_install_error` is emitted if the installation process fails for
|
||||||
|
some reason. The payload will contain the keys `source`, `error_type`
|
||||||
|
and `error`. `error_type` is a short message indicating the nature of
|
||||||
|
the error, and `error` is the long traceback to help debug the
|
||||||
|
problem.
|
||||||
|
|
||||||
|
##### `model_install_cancelled`
|
||||||
|
|
||||||
|
`model_install_cancelled` is issued if the model installation is
|
||||||
|
cancelled, or if one or more of its files' downloads are
|
||||||
|
cancelled. The payload will contain `source`.
|
||||||
|
|
||||||
|
##### Following the model status
|
||||||
|
|
||||||
|
You may poll the `ModelInstallJob` object returned by `import_model()`
|
||||||
|
to ascertain the state of the install. The job status can be read from
|
||||||
|
the job's `status` attribute, an `InstallStatus` enum which has the
|
||||||
|
enumerated values `WAITING`, `DOWNLOADING`, `RUNNING`, `COMPLETED`,
|
||||||
|
`ERROR` and `CANCELLED`.
|
||||||
|
|
||||||
|
For convenience, install jobs also provided the following boolean
|
||||||
|
properties: `waiting`, `downloading`, `running`, `complete`, `errored`
|
||||||
|
and `cancelled`, as well as `in_terminal_state`. The last will return
|
||||||
|
True if the job is in the complete, errored or cancelled states.
|
||||||
|
|
||||||
Emitted if the installation process fails for some reason. The payload
|
|
||||||
will contain the keys `timestamp`, `source`, `error_type` and
|
|
||||||
`error`. `error_type` is a short message indicating the nature of the
|
|
||||||
error, and `error` is the long traceback to help debug the problem.
|
|
||||||
|
|
||||||
#### Model confguration and probing
|
#### Model confguration and probing
|
||||||
|
|
||||||
@ -621,17 +755,9 @@ overriding values for any of the model's configuration
|
|||||||
attributes. Here is an example of setting the
|
attributes. Here is an example of setting the
|
||||||
`SchedulerPredictionType` and `name` for an sd-2 model:
|
`SchedulerPredictionType` and `name` for an sd-2 model:
|
||||||
|
|
||||||
This is typically used to set
|
|
||||||
the model's name and description, but can also be used to overcome
|
|
||||||
cases in which automatic probing is unable to (correctly) determine
|
|
||||||
the model's attribute. The most common situation is the
|
|
||||||
`prediction_type` field for sd-2 (and rare sd-1) models. Here is an
|
|
||||||
example of how it works:
|
|
||||||
|
|
||||||
```
|
```
|
||||||
install_job = installer.import_model(
|
install_job = installer.import_model(
|
||||||
source='stabilityai/stable-diffusion-2-1',
|
source=HFModelSource(repo_id='stabilityai/stable-diffusion-2-1',variant='fp32'),
|
||||||
variant='fp16',
|
|
||||||
config=dict(
|
config=dict(
|
||||||
prediction_type=SchedulerPredictionType('v_prediction')
|
prediction_type=SchedulerPredictionType('v_prediction')
|
||||||
name='stable diffusion 2 base model',
|
name='stable diffusion 2 base model',
|
||||||
@ -643,29 +769,38 @@ install_job = installer.import_model(
|
|||||||
|
|
||||||
This section describes additional methods provided by the installer class.
|
This section describes additional methods provided by the installer class.
|
||||||
|
|
||||||
#### jobs = installer.wait_for_installs()
|
#### jobs = installer.wait_for_installs([timeout])
|
||||||
|
|
||||||
Block until all pending installs are completed or errored and then
|
Block until all pending installs are completed or errored and then
|
||||||
returns a list of completed jobs.
|
returns a list of completed jobs. The optional `timeout` argument will
|
||||||
|
return from the call if jobs aren't completed in the specified
|
||||||
|
time. An argument of 0 (the default) will block indefinitely.
|
||||||
|
|
||||||
#### jobs = installer.list_jobs([source])
|
#### jobs = installer.list_jobs()
|
||||||
|
|
||||||
Return a list of all active and complete `ModelInstallJobs`. An
|
Return a list of all active and complete `ModelInstallJobs`.
|
||||||
optional `source` argument allows you to filter the returned list by a
|
|
||||||
model source string pattern using a partial string match.
|
|
||||||
|
|
||||||
#### jobs = installer.get_job(source)
|
#### jobs = installer.get_job_by_source(source)
|
||||||
|
|
||||||
Return a list of `ModelInstallJob` corresponding to the indicated
|
Return a list of `ModelInstallJob` corresponding to the indicated
|
||||||
model source.
|
model source.
|
||||||
|
|
||||||
|
#### jobs = installer.get_job_by_id(id)
|
||||||
|
|
||||||
|
Return a list of `ModelInstallJob` corresponding to the indicated
|
||||||
|
model id.
|
||||||
|
|
||||||
|
#### jobs = installer.cancel_job(job)
|
||||||
|
|
||||||
|
Cancel the indicated job.
|
||||||
|
|
||||||
#### installer.prune_jobs
|
#### installer.prune_jobs
|
||||||
|
|
||||||
Remove non-pending jobs (completed or errored) from the job list
|
Remove jobs that are in a terminal state (i.e. complete, errored or
|
||||||
returned by `list_jobs()` and `get_job()`.
|
cancelled) from the job list returned by `list_jobs()` and
|
||||||
|
`get_job()`.
|
||||||
|
|
||||||
#### installer.app_config, installer.record_store,
|
#### installer.app_config, installer.record_store, installer.event_bus
|
||||||
installer.event_bus
|
|
||||||
|
|
||||||
Properties that provide access to the installer's `InvokeAIAppConfig`,
|
Properties that provide access to the installer's `InvokeAIAppConfig`,
|
||||||
`ModelRecordServiceBase` and `EventServiceBase` objects.
|
`ModelRecordServiceBase` and `EventServiceBase` objects.
|
||||||
@ -726,120 +861,6 @@ the API starts up. Its effect is to call `sync_to_config()` to
|
|||||||
synchronize the model record store database with what's currently on
|
synchronize the model record store database with what's currently on
|
||||||
disk.
|
disk.
|
||||||
|
|
||||||
# The remainder of this documentation is provisional, pending implementation of the Download and Load services
|
|
||||||
|
|
||||||
## Let's get loaded, the lowdown on ModelLoadService
|
|
||||||
|
|
||||||
The `ModelLoadService` is responsible for loading a named model into
|
|
||||||
memory so that it can be used for inference. Despite the fact that it
|
|
||||||
does a lot under the covers, it is very straightforward to use.
|
|
||||||
|
|
||||||
An application-wide model loader is created at API initialization time
|
|
||||||
and stored in
|
|
||||||
`ApiDependencies.invoker.services.model_loader`. However, you can
|
|
||||||
create alternative instances if you wish.
|
|
||||||
|
|
||||||
### Creating a ModelLoadService object
|
|
||||||
|
|
||||||
The class is defined in
|
|
||||||
`invokeai.app.services.model_loader_service`. It is initialized with
|
|
||||||
an InvokeAIAppConfig object, from which it gets configuration
|
|
||||||
information such as the user's desired GPU and precision, and with a
|
|
||||||
previously-created `ModelRecordServiceBase` object, from which it
|
|
||||||
loads the requested model's configuration information.
|
|
||||||
|
|
||||||
Here is a typical initialization pattern:
|
|
||||||
|
|
||||||
```
|
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
|
||||||
from invokeai.app.services.model_record_service import ModelRecordServiceBase
|
|
||||||
from invokeai.app.services.model_loader_service import ModelLoadService
|
|
||||||
|
|
||||||
config = InvokeAIAppConfig.get_config()
|
|
||||||
store = ModelRecordServiceBase.open(config)
|
|
||||||
loader = ModelLoadService(config, store)
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that we are relying on the contents of the application
|
|
||||||
configuration to choose the implementation of
|
|
||||||
`ModelRecordServiceBase`.
|
|
||||||
|
|
||||||
### get_model(key, [submodel_type], [context]) -> ModelInfo:
|
|
||||||
|
|
||||||
*** TO DO: change to get_model(key, context=None, **kwargs)
|
|
||||||
|
|
||||||
The `get_model()` method, like its similarly-named cousin in
|
|
||||||
`ModelRecordService`, receives the unique key that identifies the
|
|
||||||
model. It loads the model into memory, gets the model ready for use,
|
|
||||||
and returns a `ModelInfo` object.
|
|
||||||
|
|
||||||
The optional second argument, `subtype` is a `SubModelType` string
|
|
||||||
enum, such as "vae". It is mandatory when used with a main model, and
|
|
||||||
is used to select which part of the main model to load.
|
|
||||||
|
|
||||||
The optional third argument, `context` can be provided by
|
|
||||||
an invocation to trigger model load event reporting. See below for
|
|
||||||
details.
|
|
||||||
|
|
||||||
The returned `ModelInfo` object shares some fields in common with
|
|
||||||
`ModelConfigBase`, but is otherwise a completely different beast:
|
|
||||||
|
|
||||||
| **Field Name** | **Type** | **Description** |
|
|
||||||
|----------------|-----------------|------------------|
|
|
||||||
| `key` | str | The model key derived from the ModelRecordService database |
|
|
||||||
| `name` | str | Name of this model |
|
|
||||||
| `base_model` | BaseModelType | Base model for this model |
|
|
||||||
| `type` | ModelType or SubModelType | Either the model type (non-main) or the submodel type (main models)|
|
|
||||||
| `location` | Path or str | Location of the model on the filesystem |
|
|
||||||
| `precision` | torch.dtype | The torch.precision to use for inference |
|
|
||||||
| `context` | ModelCache.ModelLocker | A context class used to lock the model in VRAM while in use |
|
|
||||||
|
|
||||||
The types for `ModelInfo` and `SubModelType` can be imported from
|
|
||||||
`invokeai.app.services.model_loader_service`.
|
|
||||||
|
|
||||||
To use the model, you use the `ModelInfo` as a context manager using
|
|
||||||
the following pattern:
|
|
||||||
|
|
||||||
```
|
|
||||||
model_info = loader.get_model('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae'))
|
|
||||||
with model_info as vae:
|
|
||||||
image = vae.decode(latents)[0]
|
|
||||||
```
|
|
||||||
|
|
||||||
The `vae` model will stay locked in the GPU during the period of time
|
|
||||||
it is in the context manager's scope.
|
|
||||||
|
|
||||||
`get_model()` may raise any of the following exceptions:
|
|
||||||
|
|
||||||
- `UnknownModelException` -- key not in database
|
|
||||||
- `ModelNotFoundException` -- key in database but model not found at path
|
|
||||||
- `InvalidModelException` -- the model is guilty of a variety of sins
|
|
||||||
|
|
||||||
** TO DO: ** Resolve discrepancy between ModelInfo.location and
|
|
||||||
ModelConfig.path.
|
|
||||||
|
|
||||||
### Emitting model loading events
|
|
||||||
|
|
||||||
When the `context` argument is passed to `get_model()`, it will
|
|
||||||
retrieve the invocation event bus from the passed `InvocationContext`
|
|
||||||
object to emit events on the invocation bus. The two events are
|
|
||||||
"model_load_started" and "model_load_completed". Both carry the
|
|
||||||
following payload:
|
|
||||||
|
|
||||||
```
|
|
||||||
payload=dict(
|
|
||||||
queue_id=queue_id,
|
|
||||||
queue_item_id=queue_item_id,
|
|
||||||
queue_batch_id=queue_batch_id,
|
|
||||||
graph_execution_state_id=graph_execution_state_id,
|
|
||||||
model_key=model_key,
|
|
||||||
submodel=submodel,
|
|
||||||
hash=model_info.hash,
|
|
||||||
location=str(model_info.location),
|
|
||||||
precision=str(model_info.precision),
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
***
|
***
|
||||||
|
|
||||||
## Get on line: The Download Queue
|
## Get on line: The Download Queue
|
||||||
@ -879,7 +900,6 @@ following fields:
|
|||||||
| `job_started` | float | | Timestamp for when the job started running |
|
| `job_started` | float | | Timestamp for when the job started running |
|
||||||
| `job_ended` | float | | Timestamp for when the job completed or errored out |
|
| `job_ended` | float | | Timestamp for when the job completed or errored out |
|
||||||
| `job_sequence` | int | | A counter that is incremented each time a model is dequeued |
|
| `job_sequence` | int | | A counter that is incremented each time a model is dequeued |
|
||||||
| `preserve_partial_downloads`| bool | False | Resume partial downloads when relaunched. |
|
|
||||||
| `error` | Exception | | A copy of the Exception that caused an error during download |
|
| `error` | Exception | | A copy of the Exception that caused an error during download |
|
||||||
|
|
||||||
When you create a job, you can assign it a `priority`. If multiple
|
When you create a job, you can assign it a `priority`. If multiple
|
||||||
@ -1184,3 +1204,362 @@ other resources that it might have been using.
|
|||||||
This will start/pause/cancel all jobs that have been submitted to the
|
This will start/pause/cancel all jobs that have been submitted to the
|
||||||
queue and have not yet reached a terminal state.
|
queue and have not yet reached a terminal state.
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
## This Meta be Good: Model Metadata Storage
|
||||||
|
|
||||||
|
The modules found under `invokeai.backend.model_manager.metadata`
|
||||||
|
provide a straightforward API for fetching model metadatda from online
|
||||||
|
repositories. Currently two repositories are supported: HuggingFace
|
||||||
|
and Civitai. However, the modules are easily extended for additional
|
||||||
|
repos, provided that they have defined APIs for metadata access.
|
||||||
|
|
||||||
|
Metadata comprises any descriptive information that is not essential
|
||||||
|
for getting the model to run. For example "author" is metadata, while
|
||||||
|
"type", "base" and "format" are not. The latter fields are part of the
|
||||||
|
model's config, as defined in `invokeai.backend.model_manager.config`.
|
||||||
|
|
||||||
|
### Example Usage:
|
||||||
|
|
||||||
|
```
|
||||||
|
from invokeai.backend.model_manager.metadata import (
|
||||||
|
AnyModelRepoMetadata,
|
||||||
|
CivitaiMetadataFetch,
|
||||||
|
CivitaiMetadata
|
||||||
|
ModelMetadataStore,
|
||||||
|
)
|
||||||
|
# to access the initialized sql database
|
||||||
|
from invokeai.app.api.dependencies import ApiDependencies
|
||||||
|
|
||||||
|
civitai = CivitaiMetadataFetch()
|
||||||
|
|
||||||
|
# fetch the metadata
|
||||||
|
model_metadata = civitai.from_url("https://civitai.com/models/215796")
|
||||||
|
|
||||||
|
# get some common metadata fields
|
||||||
|
author = model_metadata.author
|
||||||
|
tags = model_metadata.tags
|
||||||
|
|
||||||
|
# get some Civitai-specific fields
|
||||||
|
assert isinstance(model_metadata, CivitaiMetadata)
|
||||||
|
|
||||||
|
trained_words = model_metadata.trained_words
|
||||||
|
base_model = model_metadata.base_model_trained_on
|
||||||
|
thumbnail = model_metadata.thumbnail_url
|
||||||
|
|
||||||
|
# cache the metadata to the database using the key corresponding to
|
||||||
|
# an existing model config record in the `model_config` table
|
||||||
|
sql_cache = ModelMetadataStore(ApiDependencies.invoker.services.db)
|
||||||
|
sql_cache.add_metadata('fb237ace520b6716adc98bcb16e8462c', model_metadata)
|
||||||
|
|
||||||
|
# now we can search the database by tag, author or model name
|
||||||
|
# matches will contain a list of model keys that match the search
|
||||||
|
matches = sql_cache.search_by_tag({"tool", "turbo"})
|
||||||
|
```
|
||||||
|
|
||||||
|
### Structure of the Metadata objects
|
||||||
|
|
||||||
|
There is a short class hierarchy of Metadata objects, all of which
|
||||||
|
descend from the Pydantic `BaseModel`.
|
||||||
|
|
||||||
|
#### `ModelMetadataBase`
|
||||||
|
|
||||||
|
This is the common base class for metadata:
|
||||||
|
|
||||||
|
| **Field Name** | **Type** | **Description** |
|
||||||
|
|----------------|-----------------|------------------|
|
||||||
|
| `name` | str | Repository's name for the model |
|
||||||
|
| `author` | str | Model's author |
|
||||||
|
| `tags` | Set[str] | Model tags |
|
||||||
|
|
||||||
|
|
||||||
|
Note that the model config record also has a `name` field. It is
|
||||||
|
intended that the config record version be locally customizable, while
|
||||||
|
the metadata version is read-only. However, enforcing this is expected
|
||||||
|
to be part of the business logic.
|
||||||
|
|
||||||
|
Descendents of the base add additional fields.
|
||||||
|
|
||||||
|
#### `HuggingFaceMetadata`
|
||||||
|
|
||||||
|
This descends from `ModelMetadataBase` and adds the following fields:
|
||||||
|
|
||||||
|
| **Field Name** | **Type** | **Description** |
|
||||||
|
|----------------|-----------------|------------------|
|
||||||
|
| `type` | Literal["huggingface"] | Used for the discriminated union of metadata classes|
|
||||||
|
| `id` | str | HuggingFace repo_id |
|
||||||
|
| `tag_dict` | Dict[str, Any] | A dictionary of tag/value pairs provided in addition to `tags` |
|
||||||
|
| `last_modified`| datetime | Date of last commit of this model to the repo |
|
||||||
|
| `files` | List[Path] | List of the files in the model repo |
|
||||||
|
|
||||||
|
|
||||||
|
#### `CivitaiMetadata`
|
||||||
|
|
||||||
|
This descends from `ModelMetadataBase` and adds the following fields:
|
||||||
|
|
||||||
|
| **Field Name** | **Type** | **Description** |
|
||||||
|
|----------------|-----------------|------------------|
|
||||||
|
| `type` | Literal["civitai"] | Used for the discriminated union of metadata classes|
|
||||||
|
| `id` | int | Civitai model id |
|
||||||
|
| `version_name` | str | Name of this version of the model (distinct from model name) |
|
||||||
|
| `version_id` | int | Civitai model version id (distinct from model id) |
|
||||||
|
| `created` | datetime | Date this version of the model was created |
|
||||||
|
| `updated` | datetime | Date this version of the model was last updated |
|
||||||
|
| `published` | datetime | Date this version of the model was published to Civitai |
|
||||||
|
| `description` | str | Model description. Quite verbose and contains HTML tags |
|
||||||
|
| `version_description` | str | Model version description, usually describes changes to the model |
|
||||||
|
| `nsfw` | bool | Whether the model tends to generate NSFW content |
|
||||||
|
| `restrictions` | LicenseRestrictions | An object that describes what is and isn't allowed with this model |
|
||||||
|
| `trained_words`| Set[str] | Trigger words for this model, if any |
|
||||||
|
| `download_url` | AnyHttpUrl | URL for downloading this version of the model |
|
||||||
|
| `base_model_trained_on` | str | Name of the model that this version was trained on |
|
||||||
|
| `thumbnail_url` | AnyHttpUrl | URL to access a representative thumbnail image of the model's output |
|
||||||
|
| `weight_min` | int | For LoRA sliders, the minimum suggested weight to apply |
|
||||||
|
| `weight_max` | int | For LoRA sliders, the maximum suggested weight to apply |
|
||||||
|
|
||||||
|
Note that `weight_min` and `weight_max` are not currently populated
|
||||||
|
and take the default values of (-1.0, +2.0). The issue is that these
|
||||||
|
values aren't part of the structured data but appear in the text
|
||||||
|
description. Some regular expression or LLM coding may be able to
|
||||||
|
extract these values.
|
||||||
|
|
||||||
|
Also be aware that `base_model_trained_on` is free text and doesn't
|
||||||
|
correspond to our `ModelType` enum.
|
||||||
|
|
||||||
|
`CivitaiMetadata` also defines some convenience properties relating to
|
||||||
|
licensing restrictions: `credit_required`, `allow_commercial_use`,
|
||||||
|
`allow_derivatives` and `allow_different_license`.
|
||||||
|
|
||||||
|
#### `AnyModelRepoMetadata`
|
||||||
|
|
||||||
|
This is a discriminated Union of `CivitaiMetadata` and
|
||||||
|
`HuggingFaceMetadata`.
|
||||||
|
|
||||||
|
### Fetching Metadata from Online Repos
|
||||||
|
|
||||||
|
The `HuggingFaceMetadataFetch` and `CivitaiMetadataFetch` classes will
|
||||||
|
retrieve metadata from their corresponding repositories and return
|
||||||
|
`AnyModelRepoMetadata` objects. Their base class
|
||||||
|
`ModelMetadataFetchBase` is an abstract class that defines two
|
||||||
|
methods: `from_url()` and `from_id()`. The former accepts the type of
|
||||||
|
model URLs that the user will try to cut and paste into the model
|
||||||
|
import form. The latter accepts a string ID in the format recognized
|
||||||
|
by the repository of choice. Both methods return an
|
||||||
|
`AnyModelRepoMetadata`.
|
||||||
|
|
||||||
|
The base class also has a class method `from_json()` which will take
|
||||||
|
the JSON representation of a `ModelMetadata` object, validate it, and
|
||||||
|
return the corresponding `AnyModelRepoMetadata` object.
|
||||||
|
|
||||||
|
When initializing one of the metadata fetching classes, you may
|
||||||
|
provide a `requests.Session` argument. This allows you to customize
|
||||||
|
the low-level HTTP fetch requests and is used, for instance, in the
|
||||||
|
testing suite to avoid hitting the internet.
|
||||||
|
|
||||||
|
The HuggingFace and Civitai fetcher subclasses add additional
|
||||||
|
repo-specific fetching methods:
|
||||||
|
|
||||||
|
|
||||||
|
#### HuggingFaceMetadataFetch
|
||||||
|
|
||||||
|
This overrides its base class `from_json()` method to return a
|
||||||
|
`HuggingFaceMetadata` object directly.
|
||||||
|
|
||||||
|
#### CivitaiMetadataFetch
|
||||||
|
|
||||||
|
This adds the following methods:
|
||||||
|
|
||||||
|
`from_civitai_modelid()` This takes the ID of a model, finds the
|
||||||
|
default version of the model, and then retrieves the metadata for
|
||||||
|
that version, returning a `CivitaiMetadata` object directly.
|
||||||
|
|
||||||
|
`from_civitai_versionid()` This takes the ID of a model version and
|
||||||
|
retrieves its metadata. Functionally equivalent to `from_id()`, the
|
||||||
|
only difference is that it returna a `CivitaiMetadata` object rather
|
||||||
|
than an `AnyModelRepoMetadata`.
|
||||||
|
|
||||||
|
|
||||||
|
### Metadata Storage
|
||||||
|
|
||||||
|
The `ModelMetadataStore` provides a simple facility to store model
|
||||||
|
metadata in the `invokeai.db` database. The data is stored as a JSON
|
||||||
|
blob, with a few common fields (`name`, `author`, `tags`) broken out
|
||||||
|
to be searchable.
|
||||||
|
|
||||||
|
When a metadata object is saved to the database, it is identified
|
||||||
|
using the model key, _and this key must correspond to an existing
|
||||||
|
model key in the model_config table_. There is a foreign key integrity
|
||||||
|
constraint between the `model_config.id` field and the
|
||||||
|
`model_metadata.id` field such that if you attempt to save metadata
|
||||||
|
under an unknown key, the attempt will result in an
|
||||||
|
`UnknownModelException`. Likewise, when a model is deleted from
|
||||||
|
`model_config`, the deletion of the corresponding metadata record will
|
||||||
|
be triggered.
|
||||||
|
|
||||||
|
Tags are stored in a normalized fashion in the tables `model_tags` and
|
||||||
|
`tags`. Triggers keep the tag table in sync with the `model_metadata`
|
||||||
|
table.
|
||||||
|
|
||||||
|
To create the storage object, initialize it with the InvokeAI
|
||||||
|
`SqliteDatabase` object. This is often done this way:
|
||||||
|
|
||||||
|
```
|
||||||
|
from invokeai.app.api.dependencies import ApiDependencies
|
||||||
|
metadata_store = ModelMetadataStore(ApiDependencies.invoker.services.db)
|
||||||
|
```
|
||||||
|
|
||||||
|
You can then access the storage with the following methods:
|
||||||
|
|
||||||
|
#### `add_metadata(key, metadata)`
|
||||||
|
|
||||||
|
Add the metadata using a previously-defined model key.
|
||||||
|
|
||||||
|
There is currently no `delete_metadata()` method. The metadata will
|
||||||
|
persist until the matching config is deleted from the `model_config`
|
||||||
|
table.
|
||||||
|
|
||||||
|
#### `get_metadata(key) -> AnyModelRepoMetadata`
|
||||||
|
|
||||||
|
Retrieve the metadata corresponding to the model key.
|
||||||
|
|
||||||
|
#### `update_metadata(key, new_metadata)`
|
||||||
|
|
||||||
|
Update an existing metadata record with new metadata.
|
||||||
|
|
||||||
|
#### `search_by_tag(tags: Set[str]) -> Set[str]`
|
||||||
|
|
||||||
|
Given a set of tags, find models that are tagged with them. If
|
||||||
|
multiple tags are provided then a matching model must be tagged with
|
||||||
|
*all* the tags in the set. This method returns a set of model keys and
|
||||||
|
is intended to be used in conjunction with the `ModelRecordService`:
|
||||||
|
|
||||||
|
```
|
||||||
|
model_config_store = ApiDependencies.invoker.services.model_records
|
||||||
|
matches = metadata_store.search_by_tag({'license:other'})
|
||||||
|
models = [model_config_store.get(x) for x in matches]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `search_by_name(name: str) -> Set[str]
|
||||||
|
|
||||||
|
Find all model metadata records that have the given name and return a
|
||||||
|
set of keys to the corresponding model config objects.
|
||||||
|
|
||||||
|
#### `search_by_author(author: str) -> Set[str]
|
||||||
|
|
||||||
|
Find all model metadata records that have the given author and return
|
||||||
|
a set of keys to the corresponding model config objects.
|
||||||
|
|
||||||
|
# The remainder of this documentation is provisional, pending implementation of the Load service
|
||||||
|
|
||||||
|
## Let's get loaded, the lowdown on ModelLoadService
|
||||||
|
|
||||||
|
The `ModelLoadService` is responsible for loading a named model into
|
||||||
|
memory so that it can be used for inference. Despite the fact that it
|
||||||
|
does a lot under the covers, it is very straightforward to use.
|
||||||
|
|
||||||
|
An application-wide model loader is created at API initialization time
|
||||||
|
and stored in
|
||||||
|
`ApiDependencies.invoker.services.model_loader`. However, you can
|
||||||
|
create alternative instances if you wish.
|
||||||
|
|
||||||
|
### Creating a ModelLoadService object
|
||||||
|
|
||||||
|
The class is defined in
|
||||||
|
`invokeai.app.services.model_loader_service`. It is initialized with
|
||||||
|
an InvokeAIAppConfig object, from which it gets configuration
|
||||||
|
information such as the user's desired GPU and precision, and with a
|
||||||
|
previously-created `ModelRecordServiceBase` object, from which it
|
||||||
|
loads the requested model's configuration information.
|
||||||
|
|
||||||
|
Here is a typical initialization pattern:
|
||||||
|
|
||||||
|
```
|
||||||
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
|
from invokeai.app.services.model_record_service import ModelRecordServiceBase
|
||||||
|
from invokeai.app.services.model_loader_service import ModelLoadService
|
||||||
|
|
||||||
|
config = InvokeAIAppConfig.get_config()
|
||||||
|
store = ModelRecordServiceBase.open(config)
|
||||||
|
loader = ModelLoadService(config, store)
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that we are relying on the contents of the application
|
||||||
|
configuration to choose the implementation of
|
||||||
|
`ModelRecordServiceBase`.
|
||||||
|
|
||||||
|
### get_model(key, [submodel_type], [context]) -> ModelInfo:
|
||||||
|
|
||||||
|
*** TO DO: change to get_model(key, context=None, **kwargs)
|
||||||
|
|
||||||
|
The `get_model()` method, like its similarly-named cousin in
|
||||||
|
`ModelRecordService`, receives the unique key that identifies the
|
||||||
|
model. It loads the model into memory, gets the model ready for use,
|
||||||
|
and returns a `ModelInfo` object.
|
||||||
|
|
||||||
|
The optional second argument, `subtype` is a `SubModelType` string
|
||||||
|
enum, such as "vae". It is mandatory when used with a main model, and
|
||||||
|
is used to select which part of the main model to load.
|
||||||
|
|
||||||
|
The optional third argument, `context` can be provided by
|
||||||
|
an invocation to trigger model load event reporting. See below for
|
||||||
|
details.
|
||||||
|
|
||||||
|
The returned `ModelInfo` object shares some fields in common with
|
||||||
|
`ModelConfigBase`, but is otherwise a completely different beast:
|
||||||
|
|
||||||
|
| **Field Name** | **Type** | **Description** |
|
||||||
|
|----------------|-----------------|------------------|
|
||||||
|
| `key` | str | The model key derived from the ModelRecordService database |
|
||||||
|
| `name` | str | Name of this model |
|
||||||
|
| `base_model` | BaseModelType | Base model for this model |
|
||||||
|
| `type` | ModelType or SubModelType | Either the model type (non-main) or the submodel type (main models)|
|
||||||
|
| `location` | Path or str | Location of the model on the filesystem |
|
||||||
|
| `precision` | torch.dtype | The torch.precision to use for inference |
|
||||||
|
| `context` | ModelCache.ModelLocker | A context class used to lock the model in VRAM while in use |
|
||||||
|
|
||||||
|
The types for `ModelInfo` and `SubModelType` can be imported from
|
||||||
|
`invokeai.app.services.model_loader_service`.
|
||||||
|
|
||||||
|
To use the model, you use the `ModelInfo` as a context manager using
|
||||||
|
the following pattern:
|
||||||
|
|
||||||
|
```
|
||||||
|
model_info = loader.get_model('f13dd932c0c35c22dcb8d6cda4203764', SubModelType('vae'))
|
||||||
|
with model_info as vae:
|
||||||
|
image = vae.decode(latents)[0]
|
||||||
|
```
|
||||||
|
|
||||||
|
The `vae` model will stay locked in the GPU during the period of time
|
||||||
|
it is in the context manager's scope.
|
||||||
|
|
||||||
|
`get_model()` may raise any of the following exceptions:
|
||||||
|
|
||||||
|
- `UnknownModelException` -- key not in database
|
||||||
|
- `ModelNotFoundException` -- key in database but model not found at path
|
||||||
|
- `InvalidModelException` -- the model is guilty of a variety of sins
|
||||||
|
|
||||||
|
** TO DO: ** Resolve discrepancy between ModelInfo.location and
|
||||||
|
ModelConfig.path.
|
||||||
|
|
||||||
|
### Emitting model loading events
|
||||||
|
|
||||||
|
When the `context` argument is passed to `get_model()`, it will
|
||||||
|
retrieve the invocation event bus from the passed `InvocationContext`
|
||||||
|
object to emit events on the invocation bus. The two events are
|
||||||
|
"model_load_started" and "model_load_completed". Both carry the
|
||||||
|
following payload:
|
||||||
|
|
||||||
|
```
|
||||||
|
payload=dict(
|
||||||
|
queue_id=queue_id,
|
||||||
|
queue_item_id=queue_item_id,
|
||||||
|
queue_batch_id=queue_batch_id,
|
||||||
|
graph_execution_state_id=graph_execution_state_id,
|
||||||
|
model_key=model_key,
|
||||||
|
submodel=submodel,
|
||||||
|
hash=model_info.hash,
|
||||||
|
location=str(model_info.location),
|
||||||
|
precision=str(model_info.precision),
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
53
docs/deprecated/2to3.md
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
## :octicons-log-16: Important Changes Since Version 2.3
|
||||||
|
|
||||||
|
### Nodes
|
||||||
|
|
||||||
|
Behind the scenes, InvokeAI has been completely rewritten to support
|
||||||
|
"nodes," small unitary operations that can be combined into graphs to
|
||||||
|
form arbitrary workflows. For example, there is a prompt node that
|
||||||
|
processes the prompt string and feeds it to a text2latent node that
|
||||||
|
generates a latent image. The latents are then fed to a latent2image
|
||||||
|
node that translates the latent image into a PNG.
|
||||||
|
|
||||||
|
The WebGUI has a node editor that allows you to graphically design and
|
||||||
|
execute custom node graphs. The ability to save and load graphs is
|
||||||
|
still a work in progress, but coming soon.
|
||||||
|
|
||||||
|
### Command-Line Interface Retired
|
||||||
|
|
||||||
|
All "invokeai" command-line interfaces have been retired as of version
|
||||||
|
3.4.
|
||||||
|
|
||||||
|
To launch the Web GUI from the command-line, use the command
|
||||||
|
`invokeai-web` rather than the traditional `invokeai --web`.
|
||||||
|
|
||||||
|
### ControlNet
|
||||||
|
|
||||||
|
This version of InvokeAI features ControlNet, a system that allows you
|
||||||
|
to achieve exact poses for human and animal figures by providing a
|
||||||
|
model to follow. Full details are found in [ControlNet](features/CONTROLNET.md)
|
||||||
|
|
||||||
|
### New Schedulers
|
||||||
|
|
||||||
|
The list of schedulers has been completely revamped and brought up to date:
|
||||||
|
|
||||||
|
| **Short Name** | **Scheduler** | **Notes** |
|
||||||
|
|----------------|---------------------------------|-----------------------------|
|
||||||
|
| **ddim** | DDIMScheduler | |
|
||||||
|
| **ddpm** | DDPMScheduler | |
|
||||||
|
| **deis** | DEISMultistepScheduler | |
|
||||||
|
| **lms** | LMSDiscreteScheduler | |
|
||||||
|
| **pndm** | PNDMScheduler | |
|
||||||
|
| **heun** | HeunDiscreteScheduler | original noise schedule |
|
||||||
|
| **heun_k** | HeunDiscreteScheduler | using karras noise schedule |
|
||||||
|
| **euler** | EulerDiscreteScheduler | original noise schedule |
|
||||||
|
| **euler_k** | EulerDiscreteScheduler | using karras noise schedule |
|
||||||
|
| **kdpm_2** | KDPM2DiscreteScheduler | |
|
||||||
|
| **kdpm_2_a** | KDPM2AncestralDiscreteScheduler | |
|
||||||
|
| **dpmpp_2s** | DPMSolverSinglestepScheduler | |
|
||||||
|
| **dpmpp_2m** | DPMSolverMultistepScheduler | original noise scnedule |
|
||||||
|
| **dpmpp_2m_k** | DPMSolverMultistepScheduler | using karras noise schedule |
|
||||||
|
| **unipc** | UniPCMultistepScheduler | CPU only |
|
||||||
|
| **lcm** | LCMScheduler | |
|
||||||
|
|
||||||
|
Please see [3.0.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v3.0.0) for further details.
|
@ -229,29 +229,28 @@ clarity on the intent and common use cases we expect for utilizing them.
|
|||||||
currently being rendered by your browser into a merged copy of the image. This
|
currently being rendered by your browser into a merged copy of the image. This
|
||||||
lowers the resource requirements and should improve performance.
|
lowers the resource requirements and should improve performance.
|
||||||
|
|
||||||
### Seam Correction
|
### Compositing / Seam Correction
|
||||||
|
|
||||||
When doing Inpainting or Outpainting, Invoke needs to merge the pixels generated
|
When doing Inpainting or Outpainting, Invoke needs to merge the pixels generated
|
||||||
by Stable Diffusion into your existing image. To do this, the area around the
|
by Stable Diffusion into your existing image. This is achieved through compositing - the area around the the boundary between your image and the new generation is
|
||||||
`seam` at the boundary between your image and the new generation is
|
|
||||||
automatically blended to produce a seamless output. In a fully automatic
|
automatically blended to produce a seamless output. In a fully automatic
|
||||||
process, a mask is generated to cover the seam, and then the area of the seam is
|
process, a mask is generated to cover the boundary, and then the area of the boundary is
|
||||||
Inpainted.
|
Inpainted.
|
||||||
|
|
||||||
Although the default options should work well most of the time, sometimes it can
|
Although the default options should work well most of the time, sometimes it can
|
||||||
help to alter the parameters that control the seam Inpainting. A wider seam and
|
help to alter the parameters that control the Compositing. A larger blur and
|
||||||
a blur setting of about 1/3 of the seam have been noted as producing
|
a blur setting have been noted as producing
|
||||||
consistently strong results (e.g. 96 wide and 16 blur - adds up to 32 blur with
|
consistently strong results . Strength of 0.7 is best for reducing hard seams.
|
||||||
both sides). Seam strength of 0.7 is best for reducing hard seams.
|
|
||||||
|
- **Mode** - What part of the image will have the the Compositing applied to it.
|
||||||
|
- **Mask edge** will apply Compositing to the edge of the masked area
|
||||||
|
- **Mask** will apply Compositing to the entire masked area
|
||||||
|
- **Unmasked** will apply Compositing to the entire image
|
||||||
|
- **Steps** - Number of generation steps that will occur during the Coherence Pass, similar to Denoising Steps. Higher step counts will generally have better results.
|
||||||
|
- **Strength** - How much noise is added for the Coherence Pass, similar to Denoising Strength. A strength of 0 will result in an unchanged image, while a strength of 1 will result in an image with a completely new area as defined by the Mode setting.
|
||||||
|
- **Blur** - Adjusts the pixel radius of the the mask. A larger blur radius will cause the mask to extend past the visibly masked area, while too small of a blur radius will result in a mask that is smaller than the visibly masked area.
|
||||||
|
- **Blur Method** - The method of blur applied to the masked area.
|
||||||
|
|
||||||
- **Seam Size** - The size of the seam masked area. Set higher to make a larger
|
|
||||||
mask around the seam.
|
|
||||||
- **Seam Blur** - The size of the blur that is applied on _each_ side of the
|
|
||||||
masked area.
|
|
||||||
- **Seam Strength** - The Image To Image Strength parameter used for the
|
|
||||||
Inpainting generation that is applied to the seam area.
|
|
||||||
- **Seam Steps** - The number of generation steps that should be used to Inpaint
|
|
||||||
the seam.
|
|
||||||
|
|
||||||
### Infill & Scaling
|
### Infill & Scaling
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ title: Home
|
|||||||
width: 100%;
|
width: 100%;
|
||||||
max-width: 100%;
|
max-width: 100%;
|
||||||
height: 50px;
|
height: 50px;
|
||||||
background-color: #448AFF;
|
background-color: #35A4DB;
|
||||||
color: #fff;
|
color: #fff;
|
||||||
font-size: 16px;
|
font-size: 16px;
|
||||||
border: none;
|
border: none;
|
||||||
@ -43,7 +43,7 @@ title: Home
|
|||||||
<div align="center" markdown>
|
<div align="center" markdown>
|
||||||
|
|
||||||
|
|
||||||
[](https://github.com/invoke-ai/InvokeAI)
|
[](https://github.com/invoke-ai/InvokeAI)
|
||||||
|
|
||||||
[![discord badge]][discord link]
|
[![discord badge]][discord link]
|
||||||
|
|
||||||
@ -145,60 +145,6 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB of RAM.
|
|||||||
- [Guide to InvokeAI Runtime Settings](features/CONFIGURATION.md)
|
- [Guide to InvokeAI Runtime Settings](features/CONFIGURATION.md)
|
||||||
- [Database Maintenance and other Command Line Utilities](features/UTILITIES.md)
|
- [Database Maintenance and other Command Line Utilities](features/UTILITIES.md)
|
||||||
|
|
||||||
## :octicons-log-16: Important Changes Since Version 2.3
|
|
||||||
|
|
||||||
### Nodes
|
|
||||||
|
|
||||||
Behind the scenes, InvokeAI has been completely rewritten to support
|
|
||||||
"nodes," small unitary operations that can be combined into graphs to
|
|
||||||
form arbitrary workflows. For example, there is a prompt node that
|
|
||||||
processes the prompt string and feeds it to a text2latent node that
|
|
||||||
generates a latent image. The latents are then fed to a latent2image
|
|
||||||
node that translates the latent image into a PNG.
|
|
||||||
|
|
||||||
The WebGUI has a node editor that allows you to graphically design and
|
|
||||||
execute custom node graphs. The ability to save and load graphs is
|
|
||||||
still a work in progress, but coming soon.
|
|
||||||
|
|
||||||
### Command-Line Interface Retired
|
|
||||||
|
|
||||||
All "invokeai" command-line interfaces have been retired as of version
|
|
||||||
3.4.
|
|
||||||
|
|
||||||
To launch the Web GUI from the command-line, use the command
|
|
||||||
`invokeai-web` rather than the traditional `invokeai --web`.
|
|
||||||
|
|
||||||
### ControlNet
|
|
||||||
|
|
||||||
This version of InvokeAI features ControlNet, a system that allows you
|
|
||||||
to achieve exact poses for human and animal figures by providing a
|
|
||||||
model to follow. Full details are found in [ControlNet](features/CONTROLNET.md)
|
|
||||||
|
|
||||||
### New Schedulers
|
|
||||||
|
|
||||||
The list of schedulers has been completely revamped and brought up to date:
|
|
||||||
|
|
||||||
| **Short Name** | **Scheduler** | **Notes** |
|
|
||||||
|----------------|---------------------------------|-----------------------------|
|
|
||||||
| **ddim** | DDIMScheduler | |
|
|
||||||
| **ddpm** | DDPMScheduler | |
|
|
||||||
| **deis** | DEISMultistepScheduler | |
|
|
||||||
| **lms** | LMSDiscreteScheduler | |
|
|
||||||
| **pndm** | PNDMScheduler | |
|
|
||||||
| **heun** | HeunDiscreteScheduler | original noise schedule |
|
|
||||||
| **heun_k** | HeunDiscreteScheduler | using karras noise schedule |
|
|
||||||
| **euler** | EulerDiscreteScheduler | original noise schedule |
|
|
||||||
| **euler_k** | EulerDiscreteScheduler | using karras noise schedule |
|
|
||||||
| **kdpm_2** | KDPM2DiscreteScheduler | |
|
|
||||||
| **kdpm_2_a** | KDPM2AncestralDiscreteScheduler | |
|
|
||||||
| **dpmpp_2s** | DPMSolverSinglestepScheduler | |
|
|
||||||
| **dpmpp_2m** | DPMSolverMultistepScheduler | original noise scnedule |
|
|
||||||
| **dpmpp_2m_k** | DPMSolverMultistepScheduler | using karras noise schedule |
|
|
||||||
| **unipc** | UniPCMultistepScheduler | CPU only |
|
|
||||||
| **lcm** | LCMScheduler | |
|
|
||||||
|
|
||||||
Please see [3.0.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v3.0.0) for further details.
|
|
||||||
|
|
||||||
## :material-target: Troubleshooting
|
## :material-target: Troubleshooting
|
||||||
|
|
||||||
Please check out our **[:material-frequently-asked-questions:
|
Please check out our **[:material-frequently-asked-questions:
|
||||||
|
@ -6,10 +6,17 @@ If you're not familiar with Diffusion, take a look at our [Diffusion Overview.](
|
|||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
|
### Workflow Library
|
||||||
|
The Workflow Library enables you to save workflows to the Invoke database, allowing you to easily creating, modify and share workflows as needed.
|
||||||
|
|
||||||
|
A curated set of workflows are provided by default - these are designed to help explain important nodes' usage in the Workflow Editor.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
### Linear View
|
### Linear View
|
||||||
The Workflow Editor allows you to create a UI for your workflow, to make it easier to iterate on your generations.
|
The Workflow Editor allows you to create a UI for your workflow, to make it easier to iterate on your generations.
|
||||||
|
|
||||||
To add an input to the Linear UI, right click on the input label and select "Add to Linear View".
|
To add an input to the Linear UI, right click on the **input label** and select "Add to Linear View".
|
||||||
|
|
||||||
The Linear UI View will also be part of the saved workflow, allowing you share workflows and enable other to use them, regardless of complexity.
|
The Linear UI View will also be part of the saved workflow, allowing you share workflows and enable other to use them, regardless of complexity.
|
||||||
|
|
||||||
@ -30,7 +37,7 @@ Any node or input field can be renamed in the workflow editor. If the input fiel
|
|||||||
Nodes have a "Use Cache" option in their footer. This allows for performance improvements by using the previously cached values during the workflow processing.
|
Nodes have a "Use Cache" option in their footer. This allows for performance improvements by using the previously cached values during the workflow processing.
|
||||||
|
|
||||||
|
|
||||||
## Important Concepts
|
## Important Nodes & Concepts
|
||||||
|
|
||||||
There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. Note that the screenshots below aren't examples of complete functioning node graphs (see Examples).
|
There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. Note that the screenshots below aren't examples of complete functioning node graphs (see Examples).
|
||||||
|
|
||||||
@ -56,7 +63,7 @@ The ImageToLatents node takes in a pixel image and a VAE and outputs a latents.
|
|||||||
|
|
||||||
It is common to want to use both the same seed (for continuity) and random seeds (for variety). To define a seed, simply enter it into the 'Seed' field on a noise node. Conversely, the RandomInt node generates a random integer between 'Low' and 'High', and can be used as input to the 'Seed' edge point on a noise node to randomize your seed.
|
It is common to want to use both the same seed (for continuity) and random seeds (for variety). To define a seed, simply enter it into the 'Seed' field on a noise node. Conversely, the RandomInt node generates a random integer between 'Low' and 'High', and can be used as input to the 'Seed' edge point on a noise node to randomize your seed.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### ControlNet
|
### ControlNet
|
||||||
|
|
||||||
|
@ -36,7 +36,8 @@ To use a community workflow, download the the `.json` node graph file and load i
|
|||||||
+ [Mask Operations](#mask-operations)
|
+ [Mask Operations](#mask-operations)
|
||||||
+ [Match Histogram](#match-histogram)
|
+ [Match Histogram](#match-histogram)
|
||||||
+ [Metadata-Linked](#metadata-linked-nodes)
|
+ [Metadata-Linked](#metadata-linked-nodes)
|
||||||
+ [Negative Image](#negative-image)
|
+ [Negative Image](#negative-image)
|
||||||
|
+ [Nightmare Promptgen](#nightmare-promptgen)
|
||||||
+ [Oobabooga](#oobabooga)
|
+ [Oobabooga](#oobabooga)
|
||||||
+ [Prompt Tools](#prompt-tools)
|
+ [Prompt Tools](#prompt-tools)
|
||||||
+ [Remote Image](#remote-image)
|
+ [Remote Image](#remote-image)
|
||||||
@ -346,6 +347,13 @@ Node Link: https://github.com/VeyDlin/negative-image-node
|
|||||||
View:
|
View:
|
||||||
</br><img src="https://raw.githubusercontent.com/VeyDlin/negative-image-node/master/.readme/node.png" width="500" />
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/negative-image-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Nightmare Promptgen
|
||||||
|
|
||||||
|
**Description:** Nightmare Prompt Generator - Uses a local text generation model to create unique imaginative (but usually nightmarish) prompts for InvokeAI. By default, it allows you to choose from some gpt-neo models I finetuned on over 2500 of my own InvokeAI prompts in Compel format, but you're able to add your own, as well. Offers support for replacing any troublesome words with a random choice from list you can also define.
|
||||||
|
|
||||||
|
**Node Link:** [https://github.com/gogurtenjoyer/nightmare-promptgen](https://github.com/gogurtenjoyer/nightmare-promptgen)
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
### Oobabooga
|
### Oobabooga
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Example Workflows
|
# Example Workflows
|
||||||
|
|
||||||
We've curated some example workflows for you to get started with Workflows in InvokeAI
|
We've curated some example workflows for you to get started with Workflows in InvokeAI! These can also be found in the Workflow Library, located in the Workflow Editor of Invoke.
|
||||||
|
|
||||||
To use them, right click on your desired workflow, follow the link to GitHub and click the "⬇" button to download the raw file. You can then use the "Load Workflow" functionality in InvokeAI to load the workflow and start generating images!
|
To use them, right click on your desired workflow, follow the link to GitHub and click the "⬇" button to download the raw file. You can then use the "Load Workflow" functionality in InvokeAI to load the workflow and start generating images!
|
||||||
|
|
||||||
|
@ -215,6 +215,7 @@ We thank them for all of their time and hard work.
|
|||||||
- Robert Bolender
|
- Robert Bolender
|
||||||
- Robin Rombach
|
- Robin Rombach
|
||||||
- Rohan Barar
|
- Rohan Barar
|
||||||
|
- rohinish404
|
||||||
- rpagliuca
|
- rpagliuca
|
||||||
- rromb
|
- rromb
|
||||||
- Rupesh Sreeraman
|
- Rupesh Sreeraman
|
||||||
|
5
docs/stylesheets/extra.css
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
:root {
|
||||||
|
--md-primary-fg-color: #35A4DB;
|
||||||
|
--md-primary-fg-color--light: #35A4DB;
|
||||||
|
--md-primary-fg-color--dark: #35A4DB;
|
||||||
|
}
|
@ -241,12 +241,12 @@ class InvokeAiInstance:
|
|||||||
pip[
|
pip[
|
||||||
"install",
|
"install",
|
||||||
"--require-virtualenv",
|
"--require-virtualenv",
|
||||||
"numpy~=1.24.0", # choose versions that won't be uninstalled during phase 2
|
"numpy==1.26.3", # choose versions that won't be uninstalled during phase 2
|
||||||
"urllib3~=1.26.0",
|
"urllib3~=1.26.0",
|
||||||
"requests~=2.28.0",
|
"requests~=2.28.0",
|
||||||
"torch==2.1.2",
|
"torch==2.1.2",
|
||||||
"torchmetrics==0.11.4",
|
"torchmetrics==0.11.4",
|
||||||
"torchvision>=0.16.2",
|
"torchvision==0.16.2",
|
||||||
"--force-reinstall",
|
"--force-reinstall",
|
||||||
"--find-links" if find_links is not None else None,
|
"--find-links" if find_links is not None else None,
|
||||||
find_links,
|
find_links,
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
from logging import Logger
|
from logging import Logger
|
||||||
|
|
||||||
from invokeai.app.services.shared.sqlite.sqlite_util import init_db
|
from invokeai.app.services.shared.sqlite.sqlite_util import init_db
|
||||||
|
from invokeai.backend.model_manager.metadata import ModelMetadataStore
|
||||||
from invokeai.backend.util.logging import InvokeAILogger
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
from invokeai.version.invokeai_version import __version__
|
from invokeai.version.invokeai_version import __version__
|
||||||
|
|
||||||
@ -61,7 +62,7 @@ class ApiDependencies:
|
|||||||
invoker: Invoker
|
invoker: Invoker
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def initialize(config: InvokeAIAppConfig, event_handler_id: int, logger: Logger = logger):
|
def initialize(config: InvokeAIAppConfig, event_handler_id: int, logger: Logger = logger) -> None:
|
||||||
logger.info(f"InvokeAI version {__version__}")
|
logger.info(f"InvokeAI version {__version__}")
|
||||||
logger.info(f"Root directory = {str(config.root_path)}")
|
logger.info(f"Root directory = {str(config.root_path)}")
|
||||||
logger.debug(f"Internet connectivity is {config.internet_available}")
|
logger.debug(f"Internet connectivity is {config.internet_available}")
|
||||||
@ -87,8 +88,13 @@ class ApiDependencies:
|
|||||||
model_manager = ModelManagerService(config, logger)
|
model_manager = ModelManagerService(config, logger)
|
||||||
model_record_service = ModelRecordServiceSQL(db=db)
|
model_record_service = ModelRecordServiceSQL(db=db)
|
||||||
download_queue_service = DownloadQueueService(event_bus=events)
|
download_queue_service = DownloadQueueService(event_bus=events)
|
||||||
|
metadata_store = ModelMetadataStore(db=db)
|
||||||
model_install_service = ModelInstallService(
|
model_install_service = ModelInstallService(
|
||||||
app_config=config, record_store=model_record_service, event_bus=events
|
app_config=config,
|
||||||
|
record_store=model_record_service,
|
||||||
|
download_queue=download_queue_service,
|
||||||
|
metadata_store=metadata_store,
|
||||||
|
event_bus=events,
|
||||||
)
|
)
|
||||||
names = SimpleNameService()
|
names = SimpleNameService()
|
||||||
performance_statistics = InvocationStatsService()
|
performance_statistics = InvocationStatsService()
|
||||||
@ -131,6 +137,6 @@ class ApiDependencies:
|
|||||||
db.clean()
|
db.clean()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def shutdown():
|
def shutdown() -> None:
|
||||||
if ApiDependencies.invoker:
|
if ApiDependencies.invoker:
|
||||||
ApiDependencies.invoker.stop()
|
ApiDependencies.invoker.stop()
|
||||||
|
28
invokeai/app/api/no_cache_staticfiles.py
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from starlette.responses import Response
|
||||||
|
from starlette.staticfiles import StaticFiles
|
||||||
|
|
||||||
|
|
||||||
|
class NoCacheStaticFiles(StaticFiles):
|
||||||
|
"""
|
||||||
|
This class is used to override the default caching behavior of starlette for static files,
|
||||||
|
ensuring we *never* cache static files. It modifies the file response headers to strictly
|
||||||
|
never cache the files.
|
||||||
|
|
||||||
|
Static files include the javascript bundles, fonts, locales, and some images. Generated
|
||||||
|
images are not included, as they are served by a router.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args: Any, **kwargs: Any):
|
||||||
|
self.cachecontrol = "max-age=0, no-cache, no-store, , must-revalidate"
|
||||||
|
self.pragma = "no-cache"
|
||||||
|
self.expires = "0"
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def file_response(self, *args: Any, **kwargs: Any) -> Response:
|
||||||
|
resp = super().file_response(*args, **kwargs)
|
||||||
|
resp.headers.setdefault("Cache-Control", self.cachecontrol)
|
||||||
|
resp.headers.setdefault("Pragma", self.pragma)
|
||||||
|
resp.headers.setdefault("Expires", self.expires)
|
||||||
|
return resp
|
@ -4,7 +4,7 @@
|
|||||||
|
|
||||||
from hashlib import sha1
|
from hashlib import sha1
|
||||||
from random import randbytes
|
from random import randbytes
|
||||||
from typing import Any, Dict, List, Optional
|
from typing import Any, Dict, List, Optional, Set
|
||||||
|
|
||||||
from fastapi import Body, Path, Query, Response
|
from fastapi import Body, Path, Query, Response
|
||||||
from fastapi.routing import APIRouter
|
from fastapi.routing import APIRouter
|
||||||
@ -16,13 +16,18 @@ from invokeai.app.services.model_install import ModelInstallJob, ModelSource
|
|||||||
from invokeai.app.services.model_records import (
|
from invokeai.app.services.model_records import (
|
||||||
DuplicateModelException,
|
DuplicateModelException,
|
||||||
InvalidModelException,
|
InvalidModelException,
|
||||||
|
ModelRecordOrderBy,
|
||||||
|
ModelSummary,
|
||||||
UnknownModelException,
|
UnknownModelException,
|
||||||
)
|
)
|
||||||
|
from invokeai.app.services.shared.pagination import PaginatedResults
|
||||||
from invokeai.backend.model_manager.config import (
|
from invokeai.backend.model_manager.config import (
|
||||||
AnyModelConfig,
|
AnyModelConfig,
|
||||||
BaseModelType,
|
BaseModelType,
|
||||||
|
ModelFormat,
|
||||||
ModelType,
|
ModelType,
|
||||||
)
|
)
|
||||||
|
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata
|
||||||
|
|
||||||
from ..dependencies import ApiDependencies
|
from ..dependencies import ApiDependencies
|
||||||
|
|
||||||
@ -32,11 +37,20 @@ model_records_router = APIRouter(prefix="/v1/model/record", tags=["model_manager
|
|||||||
class ModelsList(BaseModel):
|
class ModelsList(BaseModel):
|
||||||
"""Return list of configs."""
|
"""Return list of configs."""
|
||||||
|
|
||||||
models: list[AnyModelConfig]
|
models: List[AnyModelConfig]
|
||||||
|
|
||||||
model_config = ConfigDict(use_enum_values=True)
|
model_config = ConfigDict(use_enum_values=True)
|
||||||
|
|
||||||
|
|
||||||
|
class ModelTagSet(BaseModel):
|
||||||
|
"""Return tags for a set of models."""
|
||||||
|
|
||||||
|
key: str
|
||||||
|
name: str
|
||||||
|
author: str
|
||||||
|
tags: Set[str]
|
||||||
|
|
||||||
|
|
||||||
@model_records_router.get(
|
@model_records_router.get(
|
||||||
"/",
|
"/",
|
||||||
operation_id="list_model_records",
|
operation_id="list_model_records",
|
||||||
@ -45,7 +59,7 @@ async def list_model_records(
|
|||||||
base_models: Optional[List[BaseModelType]] = Query(default=None, description="Base models to include"),
|
base_models: Optional[List[BaseModelType]] = Query(default=None, description="Base models to include"),
|
||||||
model_type: Optional[ModelType] = Query(default=None, description="The type of model to get"),
|
model_type: Optional[ModelType] = Query(default=None, description="The type of model to get"),
|
||||||
model_name: Optional[str] = Query(default=None, description="Exact match on the name of the model"),
|
model_name: Optional[str] = Query(default=None, description="Exact match on the name of the model"),
|
||||||
model_format: Optional[str] = Query(
|
model_format: Optional[ModelFormat] = Query(
|
||||||
default=None, description="Exact match on the format of the model (e.g. 'diffusers')"
|
default=None, description="Exact match on the format of the model (e.g. 'diffusers')"
|
||||||
),
|
),
|
||||||
) -> ModelsList:
|
) -> ModelsList:
|
||||||
@ -86,6 +100,59 @@ async def get_model_record(
|
|||||||
raise HTTPException(status_code=404, detail=str(e))
|
raise HTTPException(status_code=404, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@model_records_router.get("/meta", operation_id="list_model_summary")
|
||||||
|
async def list_model_summary(
|
||||||
|
page: int = Query(default=0, description="The page to get"),
|
||||||
|
per_page: int = Query(default=10, description="The number of models per page"),
|
||||||
|
order_by: ModelRecordOrderBy = Query(default=ModelRecordOrderBy.Default, description="The attribute to order by"),
|
||||||
|
) -> PaginatedResults[ModelSummary]:
|
||||||
|
"""Gets a page of model summary data."""
|
||||||
|
return ApiDependencies.invoker.services.model_records.list_models(page=page, per_page=per_page, order_by=order_by)
|
||||||
|
|
||||||
|
|
||||||
|
@model_records_router.get(
|
||||||
|
"/meta/i/{key}",
|
||||||
|
operation_id="get_model_metadata",
|
||||||
|
responses={
|
||||||
|
200: {"description": "Success"},
|
||||||
|
400: {"description": "Bad request"},
|
||||||
|
404: {"description": "No metadata available"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def get_model_metadata(
|
||||||
|
key: str = Path(description="Key of the model repo metadata to fetch."),
|
||||||
|
) -> Optional[AnyModelRepoMetadata]:
|
||||||
|
"""Get a model metadata object."""
|
||||||
|
record_store = ApiDependencies.invoker.services.model_records
|
||||||
|
result = record_store.get_metadata(key)
|
||||||
|
if not result:
|
||||||
|
raise HTTPException(status_code=404, detail="No metadata for a model with this key")
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@model_records_router.get(
|
||||||
|
"/tags",
|
||||||
|
operation_id="list_tags",
|
||||||
|
)
|
||||||
|
async def list_tags() -> Set[str]:
|
||||||
|
"""Get a unique set of all the model tags."""
|
||||||
|
record_store = ApiDependencies.invoker.services.model_records
|
||||||
|
return record_store.list_tags()
|
||||||
|
|
||||||
|
|
||||||
|
@model_records_router.get(
|
||||||
|
"/tags/search",
|
||||||
|
operation_id="search_by_metadata_tags",
|
||||||
|
)
|
||||||
|
async def search_by_metadata_tags(
|
||||||
|
tags: Set[str] = Query(default=None, description="Tags to search for"),
|
||||||
|
) -> ModelsList:
|
||||||
|
"""Get a list of models."""
|
||||||
|
record_store = ApiDependencies.invoker.services.model_records
|
||||||
|
results = record_store.search_by_metadata_tag(tags)
|
||||||
|
return ModelsList(models=results)
|
||||||
|
|
||||||
|
|
||||||
@model_records_router.patch(
|
@model_records_router.patch(
|
||||||
"/i/{key}",
|
"/i/{key}",
|
||||||
operation_id="update_model_record",
|
operation_id="update_model_record",
|
||||||
@ -159,9 +226,7 @@ async def del_model_record(
|
|||||||
async def add_model_record(
|
async def add_model_record(
|
||||||
config: Annotated[AnyModelConfig, Body(description="Model config", discriminator="type")],
|
config: Annotated[AnyModelConfig, Body(description="Model config", discriminator="type")],
|
||||||
) -> AnyModelConfig:
|
) -> AnyModelConfig:
|
||||||
"""
|
"""Add a model using the configuration information appropriate for its type."""
|
||||||
Add a model using the configuration information appropriate for its type.
|
|
||||||
"""
|
|
||||||
logger = ApiDependencies.invoker.services.logger
|
logger = ApiDependencies.invoker.services.logger
|
||||||
record_store = ApiDependencies.invoker.services.model_records
|
record_store = ApiDependencies.invoker.services.model_records
|
||||||
if config.key == "<NOKEY>":
|
if config.key == "<NOKEY>":
|
||||||
@ -243,7 +308,7 @@ async def import_model(
|
|||||||
Installation occurs in the background. Either use list_model_install_jobs()
|
Installation occurs in the background. Either use list_model_install_jobs()
|
||||||
to poll for completion, or listen on the event bus for the following events:
|
to poll for completion, or listen on the event bus for the following events:
|
||||||
|
|
||||||
"model_install_started"
|
"model_install_running"
|
||||||
"model_install_completed"
|
"model_install_completed"
|
||||||
"model_install_error"
|
"model_install_error"
|
||||||
|
|
||||||
@ -279,16 +344,46 @@ async def import_model(
|
|||||||
operation_id="list_model_install_jobs",
|
operation_id="list_model_install_jobs",
|
||||||
)
|
)
|
||||||
async def list_model_install_jobs() -> List[ModelInstallJob]:
|
async def list_model_install_jobs() -> List[ModelInstallJob]:
|
||||||
"""
|
"""Return list of model install jobs."""
|
||||||
Return list of model install jobs.
|
|
||||||
|
|
||||||
If the optional 'source' argument is provided, then the list will be filtered
|
|
||||||
for partial string matches against the install source.
|
|
||||||
"""
|
|
||||||
jobs: List[ModelInstallJob] = ApiDependencies.invoker.services.model_install.list_jobs()
|
jobs: List[ModelInstallJob] = ApiDependencies.invoker.services.model_install.list_jobs()
|
||||||
return jobs
|
return jobs
|
||||||
|
|
||||||
|
|
||||||
|
@model_records_router.get(
|
||||||
|
"/import/{id}",
|
||||||
|
operation_id="get_model_install_job",
|
||||||
|
responses={
|
||||||
|
200: {"description": "Success"},
|
||||||
|
404: {"description": "No such job"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def get_model_install_job(id: int = Path(description="Model install id")) -> ModelInstallJob:
|
||||||
|
"""Return model install job corresponding to the given source."""
|
||||||
|
try:
|
||||||
|
return ApiDependencies.invoker.services.model_install.get_job_by_id(id)
|
||||||
|
except ValueError as e:
|
||||||
|
raise HTTPException(status_code=404, detail=str(e))
|
||||||
|
|
||||||
|
|
||||||
|
@model_records_router.delete(
|
||||||
|
"/import/{id}",
|
||||||
|
operation_id="cancel_model_install_job",
|
||||||
|
responses={
|
||||||
|
201: {"description": "The job was cancelled successfully"},
|
||||||
|
415: {"description": "No such job"},
|
||||||
|
},
|
||||||
|
status_code=201,
|
||||||
|
)
|
||||||
|
async def cancel_model_install_job(id: int = Path(description="Model install job ID")) -> None:
|
||||||
|
"""Cancel the model install job(s) corresponding to the given job ID."""
|
||||||
|
installer = ApiDependencies.invoker.services.model_install
|
||||||
|
try:
|
||||||
|
job = installer.get_job_by_id(id)
|
||||||
|
except ValueError as e:
|
||||||
|
raise HTTPException(status_code=415, detail=str(e))
|
||||||
|
installer.cancel_job(job)
|
||||||
|
|
||||||
|
|
||||||
@model_records_router.patch(
|
@model_records_router.patch(
|
||||||
"/import",
|
"/import",
|
||||||
operation_id="prune_model_install_jobs",
|
operation_id="prune_model_install_jobs",
|
||||||
@ -298,9 +393,7 @@ async def list_model_install_jobs() -> List[ModelInstallJob]:
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
async def prune_model_install_jobs() -> Response:
|
async def prune_model_install_jobs() -> Response:
|
||||||
"""
|
"""Prune all completed and errored jobs from the install job list."""
|
||||||
Prune all completed and errored jobs from the install job list.
|
|
||||||
"""
|
|
||||||
ApiDependencies.invoker.services.model_install.prune_jobs()
|
ApiDependencies.invoker.services.model_install.prune_jobs()
|
||||||
return Response(status_code=204)
|
return Response(status_code=204)
|
||||||
|
|
||||||
@ -315,7 +408,9 @@ async def prune_model_install_jobs() -> Response:
|
|||||||
)
|
)
|
||||||
async def sync_models_to_config() -> Response:
|
async def sync_models_to_config() -> Response:
|
||||||
"""
|
"""
|
||||||
Traverse the models and autoimport directories. Model files without a corresponding
|
Traverse the models and autoimport directories.
|
||||||
|
|
||||||
|
Model files without a corresponding
|
||||||
record in the database are added. Orphan records without a models file are deleted.
|
record in the database are added. Orphan records without a models file are deleted.
|
||||||
"""
|
"""
|
||||||
ApiDependencies.invoker.services.model_install.sync_to_config()
|
ApiDependencies.invoker.services.model_install.sync_to_config()
|
||||||
|
@ -23,10 +23,11 @@ class DynamicPromptsResponse(BaseModel):
|
|||||||
)
|
)
|
||||||
async def parse_dynamicprompts(
|
async def parse_dynamicprompts(
|
||||||
prompt: str = Body(description="The prompt to parse with dynamicprompts"),
|
prompt: str = Body(description="The prompt to parse with dynamicprompts"),
|
||||||
max_prompts: int = Body(default=1000, description="The max number of prompts to generate"),
|
max_prompts: int = Body(ge=1, le=10000, default=1000, description="The max number of prompts to generate"),
|
||||||
combinatorial: bool = Body(default=True, description="Whether to use the combinatorial generator"),
|
combinatorial: bool = Body(default=True, description="Whether to use the combinatorial generator"),
|
||||||
) -> DynamicPromptsResponse:
|
) -> DynamicPromptsResponse:
|
||||||
"""Creates a batch process"""
|
"""Creates a batch process"""
|
||||||
|
max_prompts = min(max_prompts, 10000)
|
||||||
generator: Union[RandomPromptGenerator, CombinatorialPromptGenerator]
|
generator: Union[RandomPromptGenerator, CombinatorialPromptGenerator]
|
||||||
try:
|
try:
|
||||||
error: Optional[str] = None
|
error: Optional[str] = None
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
# values from the command line or config file.
|
# values from the command line or config file.
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
from invokeai.app.api.no_cache_staticfiles import NoCacheStaticFiles
|
||||||
from invokeai.version.invokeai_version import __version__
|
from invokeai.version.invokeai_version import __version__
|
||||||
|
|
||||||
from .services.config import InvokeAIAppConfig
|
from .services.config import InvokeAIAppConfig
|
||||||
@ -27,8 +28,7 @@ if True: # hack to make flake8 happy with imports coming after setting up the c
|
|||||||
from fastapi.middleware.gzip import GZipMiddleware
|
from fastapi.middleware.gzip import GZipMiddleware
|
||||||
from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html
|
from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html
|
||||||
from fastapi.openapi.utils import get_openapi
|
from fastapi.openapi.utils import get_openapi
|
||||||
from fastapi.responses import FileResponse, HTMLResponse
|
from fastapi.responses import HTMLResponse
|
||||||
from fastapi.staticfiles import StaticFiles
|
|
||||||
from fastapi_events.handlers.local import local_handler
|
from fastapi_events.handlers.local import local_handler
|
||||||
from fastapi_events.middleware import EventHandlerASGIMiddleware
|
from fastapi_events.middleware import EventHandlerASGIMiddleware
|
||||||
from pydantic.json_schema import models_json_schema
|
from pydantic.json_schema import models_json_schema
|
||||||
@ -76,7 +76,7 @@ mimetypes.add_type("text/css", ".css")
|
|||||||
|
|
||||||
# Create the app
|
# Create the app
|
||||||
# TODO: create this all in a method so configuration/etc. can be passed in?
|
# TODO: create this all in a method so configuration/etc. can be passed in?
|
||||||
app = FastAPI(title="Invoke AI", docs_url=None, redoc_url=None, separate_input_output_schemas=False)
|
app = FastAPI(title="Invoke - Community Edition", docs_url=None, redoc_url=None, separate_input_output_schemas=False)
|
||||||
|
|
||||||
# Add event handler
|
# Add event handler
|
||||||
event_handler_id: int = id(app)
|
event_handler_id: int = id(app)
|
||||||
@ -205,8 +205,8 @@ app.openapi = custom_openapi # type: ignore [method-assign] # this is a valid a
|
|||||||
def overridden_swagger() -> HTMLResponse:
|
def overridden_swagger() -> HTMLResponse:
|
||||||
return get_swagger_ui_html(
|
return get_swagger_ui_html(
|
||||||
openapi_url=app.openapi_url, # type: ignore [arg-type] # this is always a string
|
openapi_url=app.openapi_url, # type: ignore [arg-type] # this is always a string
|
||||||
title=app.title,
|
title=f"{app.title} - Swagger UI",
|
||||||
swagger_favicon_url="/static/docs/favicon.ico",
|
swagger_favicon_url="static/docs/invoke-favicon-docs.svg",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -214,26 +214,20 @@ def overridden_swagger() -> HTMLResponse:
|
|||||||
def overridden_redoc() -> HTMLResponse:
|
def overridden_redoc() -> HTMLResponse:
|
||||||
return get_redoc_html(
|
return get_redoc_html(
|
||||||
openapi_url=app.openapi_url, # type: ignore [arg-type] # this is always a string
|
openapi_url=app.openapi_url, # type: ignore [arg-type] # this is always a string
|
||||||
title=app.title,
|
title=f"{app.title} - Redoc",
|
||||||
redoc_favicon_url="/static/docs/favicon.ico",
|
redoc_favicon_url="static/docs/invoke-favicon-docs.svg",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
web_root_path = Path(list(web_dir.__path__)[0])
|
web_root_path = Path(list(web_dir.__path__)[0])
|
||||||
|
|
||||||
# Only serve the UI if we it has a build
|
try:
|
||||||
if (web_root_path / "dist").exists():
|
app.mount("/", NoCacheStaticFiles(directory=Path(web_root_path, "dist"), html=True), name="ui")
|
||||||
# Cannot add headers to StaticFiles, so we must serve index.html with a custom route
|
except RuntimeError:
|
||||||
# Add cache-control: no-store header to prevent caching of index.html, which leads to broken UIs at release
|
logger.warn(f"No UI found at {web_root_path}/dist, skipping UI mount")
|
||||||
@app.get("/", include_in_schema=False, name="ui_root")
|
app.mount(
|
||||||
def get_index() -> FileResponse:
|
"/static", NoCacheStaticFiles(directory=Path(web_root_path, "static/")), name="static"
|
||||||
return FileResponse(Path(web_root_path, "dist/index.html"), headers={"Cache-Control": "no-store"})
|
) # docs favicon is in here
|
||||||
|
|
||||||
# # Must mount *after* the other routes else it borks em
|
|
||||||
app.mount("/assets", StaticFiles(directory=Path(web_root_path, "dist/assets/")), name="assets")
|
|
||||||
app.mount("/locales", StaticFiles(directory=Path(web_root_path, "dist/locales/")), name="locales")
|
|
||||||
|
|
||||||
app.mount("/static", StaticFiles(directory=Path(web_root_path, "static/")), name="static") # docs favicon is in here
|
|
||||||
|
|
||||||
|
|
||||||
def invoke_api() -> None:
|
def invoke_api() -> None:
|
||||||
|
@ -24,9 +24,10 @@ from controlnet_aux import (
|
|||||||
)
|
)
|
||||||
from controlnet_aux.util import HWC3, ade_palette
|
from controlnet_aux.util import HWC3, ade_palette
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
||||||
|
|
||||||
from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
from invokeai.app.invocations.primitives import ImageField, ImageOutput
|
||||||
|
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||||
from invokeai.app.shared.fields import FieldDescriptions
|
from invokeai.app.shared.fields import FieldDescriptions
|
||||||
|
|
||||||
@ -75,17 +76,16 @@ class ControlField(BaseModel):
|
|||||||
resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
|
resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
|
||||||
|
|
||||||
@field_validator("control_weight")
|
@field_validator("control_weight")
|
||||||
|
@classmethod
|
||||||
def validate_control_weight(cls, v):
|
def validate_control_weight(cls, v):
|
||||||
"""Validate that all control weights in the valid range"""
|
validate_weights(v)
|
||||||
if isinstance(v, list):
|
|
||||||
for i in v:
|
|
||||||
if i < -1 or i > 2:
|
|
||||||
raise ValueError("Control weights must be within -1 to 2 range")
|
|
||||||
else:
|
|
||||||
if v < -1 or v > 2:
|
|
||||||
raise ValueError("Control weights must be within -1 to 2 range")
|
|
||||||
return v
|
return v
|
||||||
|
|
||||||
|
@model_validator(mode="after")
|
||||||
|
def validate_begin_end_step_percent(self):
|
||||||
|
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||||
|
return self
|
||||||
|
|
||||||
|
|
||||||
@invocation_output("control_output")
|
@invocation_output("control_output")
|
||||||
class ControlOutput(BaseInvocationOutput):
|
class ControlOutput(BaseInvocationOutput):
|
||||||
@ -95,17 +95,17 @@ class ControlOutput(BaseInvocationOutput):
|
|||||||
control: ControlField = OutputField(description=FieldDescriptions.control)
|
control: ControlField = OutputField(description=FieldDescriptions.control)
|
||||||
|
|
||||||
|
|
||||||
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.0")
|
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.1")
|
||||||
class ControlNetInvocation(BaseInvocation):
|
class ControlNetInvocation(BaseInvocation):
|
||||||
"""Collects ControlNet info to pass to other nodes"""
|
"""Collects ControlNet info to pass to other nodes"""
|
||||||
|
|
||||||
image: ImageField = InputField(description="The control image")
|
image: ImageField = InputField(description="The control image")
|
||||||
control_model: ControlNetModelField = InputField(description=FieldDescriptions.controlnet_model, input=Input.Direct)
|
control_model: ControlNetModelField = InputField(description=FieldDescriptions.controlnet_model, input=Input.Direct)
|
||||||
control_weight: Union[float, List[float]] = InputField(
|
control_weight: Union[float, List[float]] = InputField(
|
||||||
default=1.0, description="The weight given to the ControlNet"
|
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"
|
||||||
)
|
)
|
||||||
begin_step_percent: float = InputField(
|
begin_step_percent: float = InputField(
|
||||||
default=0, ge=-1, le=2, description="When the ControlNet is first applied (% of total steps)"
|
default=0, ge=0, le=1, description="When the ControlNet is first applied (% of total steps)"
|
||||||
)
|
)
|
||||||
end_step_percent: float = InputField(
|
end_step_percent: float = InputField(
|
||||||
default=1, ge=0, le=1, description="When the ControlNet is last applied (% of total steps)"
|
default=1, ge=0, le=1, description="When the ControlNet is last applied (% of total steps)"
|
||||||
@ -113,6 +113,17 @@ class ControlNetInvocation(BaseInvocation):
|
|||||||
control_mode: CONTROLNET_MODE_VALUES = InputField(default="balanced", description="The control mode used")
|
control_mode: CONTROLNET_MODE_VALUES = InputField(default="balanced", description="The control mode used")
|
||||||
resize_mode: CONTROLNET_RESIZE_VALUES = InputField(default="just_resize", description="The resize mode used")
|
resize_mode: CONTROLNET_RESIZE_VALUES = InputField(default="just_resize", description="The resize mode used")
|
||||||
|
|
||||||
|
@field_validator("control_weight")
|
||||||
|
@classmethod
|
||||||
|
def validate_control_weight(cls, v):
|
||||||
|
validate_weights(v)
|
||||||
|
return v
|
||||||
|
|
||||||
|
@model_validator(mode="after")
|
||||||
|
def validate_begin_end_step_percent(self) -> "ControlNetInvocation":
|
||||||
|
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||||
|
return self
|
||||||
|
|
||||||
def invoke(self, context: InvocationContext) -> ControlOutput:
|
def invoke(self, context: InvocationContext) -> ControlOutput:
|
||||||
return ControlOutput(
|
return ControlOutput(
|
||||||
control=ControlField(
|
control=ControlField(
|
||||||
|
@ -2,7 +2,7 @@ import os
|
|||||||
from builtins import float
|
from builtins import float
|
||||||
from typing import List, Union
|
from typing import List, Union
|
||||||
|
|
||||||
from pydantic import BaseModel, ConfigDict, Field
|
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import (
|
from invokeai.app.invocations.baseinvocation import (
|
||||||
BaseInvocation,
|
BaseInvocation,
|
||||||
@ -15,6 +15,7 @@ from invokeai.app.invocations.baseinvocation import (
|
|||||||
invocation_output,
|
invocation_output,
|
||||||
)
|
)
|
||||||
from invokeai.app.invocations.primitives import ImageField
|
from invokeai.app.invocations.primitives import ImageField
|
||||||
|
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||||
from invokeai.app.shared.fields import FieldDescriptions
|
from invokeai.app.shared.fields import FieldDescriptions
|
||||||
from invokeai.backend.model_management.models.base import BaseModelType, ModelType
|
from invokeai.backend.model_management.models.base import BaseModelType, ModelType
|
||||||
from invokeai.backend.model_management.models.ip_adapter import get_ip_adapter_image_encoder_model_id
|
from invokeai.backend.model_management.models.ip_adapter import get_ip_adapter_image_encoder_model_id
|
||||||
@ -39,7 +40,6 @@ class IPAdapterField(BaseModel):
|
|||||||
ip_adapter_model: IPAdapterModelField = Field(description="The IP-Adapter model to use.")
|
ip_adapter_model: IPAdapterModelField = Field(description="The IP-Adapter model to use.")
|
||||||
image_encoder_model: CLIPVisionModelField = Field(description="The name of the CLIP image encoder model.")
|
image_encoder_model: CLIPVisionModelField = Field(description="The name of the CLIP image encoder model.")
|
||||||
weight: Union[float, List[float]] = Field(default=1, description="The weight given to the ControlNet")
|
weight: Union[float, List[float]] = Field(default=1, description="The weight given to the ControlNet")
|
||||||
# weight: float = Field(default=1.0, ge=0, description="The weight of the IP-Adapter.")
|
|
||||||
begin_step_percent: float = Field(
|
begin_step_percent: float = Field(
|
||||||
default=0, ge=0, le=1, description="When the IP-Adapter is first applied (% of total steps)"
|
default=0, ge=0, le=1, description="When the IP-Adapter is first applied (% of total steps)"
|
||||||
)
|
)
|
||||||
@ -47,6 +47,17 @@ class IPAdapterField(BaseModel):
|
|||||||
default=1, ge=0, le=1, description="When the IP-Adapter is last applied (% of total steps)"
|
default=1, ge=0, le=1, description="When the IP-Adapter is last applied (% of total steps)"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@field_validator("weight")
|
||||||
|
@classmethod
|
||||||
|
def validate_ip_adapter_weight(cls, v):
|
||||||
|
validate_weights(v)
|
||||||
|
return v
|
||||||
|
|
||||||
|
@model_validator(mode="after")
|
||||||
|
def validate_begin_end_step_percent(self):
|
||||||
|
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||||
|
return self
|
||||||
|
|
||||||
|
|
||||||
@invocation_output("ip_adapter_output")
|
@invocation_output("ip_adapter_output")
|
||||||
class IPAdapterOutput(BaseInvocationOutput):
|
class IPAdapterOutput(BaseInvocationOutput):
|
||||||
@ -54,7 +65,7 @@ class IPAdapterOutput(BaseInvocationOutput):
|
|||||||
ip_adapter: IPAdapterField = OutputField(description=FieldDescriptions.ip_adapter, title="IP-Adapter")
|
ip_adapter: IPAdapterField = OutputField(description=FieldDescriptions.ip_adapter, title="IP-Adapter")
|
||||||
|
|
||||||
|
|
||||||
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.1.0")
|
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.1.1")
|
||||||
class IPAdapterInvocation(BaseInvocation):
|
class IPAdapterInvocation(BaseInvocation):
|
||||||
"""Collects IP-Adapter info to pass to other nodes."""
|
"""Collects IP-Adapter info to pass to other nodes."""
|
||||||
|
|
||||||
@ -64,18 +75,27 @@ class IPAdapterInvocation(BaseInvocation):
|
|||||||
description="The IP-Adapter model.", title="IP-Adapter Model", input=Input.Direct, ui_order=-1
|
description="The IP-Adapter model.", title="IP-Adapter Model", input=Input.Direct, ui_order=-1
|
||||||
)
|
)
|
||||||
|
|
||||||
# weight: float = InputField(default=1.0, description="The weight of the IP-Adapter.", ui_type=UIType.Float)
|
|
||||||
weight: Union[float, List[float]] = InputField(
|
weight: Union[float, List[float]] = InputField(
|
||||||
default=1, ge=-1, description="The weight given to the IP-Adapter", title="Weight"
|
default=1, description="The weight given to the IP-Adapter", title="Weight"
|
||||||
)
|
)
|
||||||
|
|
||||||
begin_step_percent: float = InputField(
|
begin_step_percent: float = InputField(
|
||||||
default=0, ge=-1, le=2, description="When the IP-Adapter is first applied (% of total steps)"
|
default=0, ge=0, le=1, description="When the IP-Adapter is first applied (% of total steps)"
|
||||||
)
|
)
|
||||||
end_step_percent: float = InputField(
|
end_step_percent: float = InputField(
|
||||||
default=1, ge=0, le=1, description="When the IP-Adapter is last applied (% of total steps)"
|
default=1, ge=0, le=1, description="When the IP-Adapter is last applied (% of total steps)"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@field_validator("weight")
|
||||||
|
@classmethod
|
||||||
|
def validate_ip_adapter_weight(cls, v):
|
||||||
|
validate_weights(v)
|
||||||
|
return v
|
||||||
|
|
||||||
|
@model_validator(mode="after")
|
||||||
|
def validate_begin_end_step_percent(self):
|
||||||
|
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||||
|
return self
|
||||||
|
|
||||||
def invoke(self, context: InvocationContext) -> IPAdapterOutput:
|
def invoke(self, context: InvocationContext) -> IPAdapterOutput:
|
||||||
# Lookup the CLIP Vision encoder that is intended to be used with the IP-Adapter model.
|
# Lookup the CLIP Vision encoder that is intended to be used with the IP-Adapter model.
|
||||||
ip_adapter_info = context.services.model_manager.model_info(
|
ip_adapter_info = context.services.model_manager.model_info(
|
||||||
|
@ -220,7 +220,7 @@ def get_scheduler(
|
|||||||
title="Denoise Latents",
|
title="Denoise Latents",
|
||||||
tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
|
tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
|
||||||
category="latents",
|
category="latents",
|
||||||
version="1.5.0",
|
version="1.5.1",
|
||||||
)
|
)
|
||||||
class DenoiseLatentsInvocation(BaseInvocation):
|
class DenoiseLatentsInvocation(BaseInvocation):
|
||||||
"""Denoises noisy latents to decodable images"""
|
"""Denoises noisy latents to decodable images"""
|
||||||
@ -279,7 +279,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
|||||||
ui_order=7,
|
ui_order=7,
|
||||||
)
|
)
|
||||||
cfg_rescale_multiplier: float = InputField(
|
cfg_rescale_multiplier: float = InputField(
|
||||||
default=0, ge=0, lt=1, description=FieldDescriptions.cfg_rescale_multiplier
|
title="CFG Rescale Multiplier", default=0, ge=0, lt=1, description=FieldDescriptions.cfg_rescale_multiplier
|
||||||
)
|
)
|
||||||
latents: Optional[LatentsField] = InputField(
|
latents: Optional[LatentsField] = InputField(
|
||||||
default=None,
|
default=None,
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
from typing import Union
|
from typing import Union
|
||||||
|
|
||||||
from pydantic import BaseModel, ConfigDict, Field
|
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import (
|
from invokeai.app.invocations.baseinvocation import (
|
||||||
BaseInvocation,
|
BaseInvocation,
|
||||||
@ -14,6 +14,7 @@ from invokeai.app.invocations.baseinvocation import (
|
|||||||
)
|
)
|
||||||
from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_RESIZE_VALUES
|
from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_RESIZE_VALUES
|
||||||
from invokeai.app.invocations.primitives import ImageField
|
from invokeai.app.invocations.primitives import ImageField
|
||||||
|
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||||
from invokeai.app.shared.fields import FieldDescriptions
|
from invokeai.app.shared.fields import FieldDescriptions
|
||||||
from invokeai.backend.model_management.models.base import BaseModelType
|
from invokeai.backend.model_management.models.base import BaseModelType
|
||||||
|
|
||||||
@ -37,6 +38,17 @@ class T2IAdapterField(BaseModel):
|
|||||||
)
|
)
|
||||||
resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
|
resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
|
||||||
|
|
||||||
|
@field_validator("weight")
|
||||||
|
@classmethod
|
||||||
|
def validate_ip_adapter_weight(cls, v):
|
||||||
|
validate_weights(v)
|
||||||
|
return v
|
||||||
|
|
||||||
|
@model_validator(mode="after")
|
||||||
|
def validate_begin_end_step_percent(self):
|
||||||
|
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||||
|
return self
|
||||||
|
|
||||||
|
|
||||||
@invocation_output("t2i_adapter_output")
|
@invocation_output("t2i_adapter_output")
|
||||||
class T2IAdapterOutput(BaseInvocationOutput):
|
class T2IAdapterOutput(BaseInvocationOutput):
|
||||||
@ -44,7 +56,7 @@ class T2IAdapterOutput(BaseInvocationOutput):
|
|||||||
|
|
||||||
|
|
||||||
@invocation(
|
@invocation(
|
||||||
"t2i_adapter", title="T2I-Adapter", tags=["t2i_adapter", "control"], category="t2i_adapter", version="1.0.0"
|
"t2i_adapter", title="T2I-Adapter", tags=["t2i_adapter", "control"], category="t2i_adapter", version="1.0.1"
|
||||||
)
|
)
|
||||||
class T2IAdapterInvocation(BaseInvocation):
|
class T2IAdapterInvocation(BaseInvocation):
|
||||||
"""Collects T2I-Adapter info to pass to other nodes."""
|
"""Collects T2I-Adapter info to pass to other nodes."""
|
||||||
@ -61,7 +73,7 @@ class T2IAdapterInvocation(BaseInvocation):
|
|||||||
default=1, ge=0, description="The weight given to the T2I-Adapter", title="Weight"
|
default=1, ge=0, description="The weight given to the T2I-Adapter", title="Weight"
|
||||||
)
|
)
|
||||||
begin_step_percent: float = InputField(
|
begin_step_percent: float = InputField(
|
||||||
default=0, ge=-1, le=2, description="When the T2I-Adapter is first applied (% of total steps)"
|
default=0, ge=0, le=1, description="When the T2I-Adapter is first applied (% of total steps)"
|
||||||
)
|
)
|
||||||
end_step_percent: float = InputField(
|
end_step_percent: float = InputField(
|
||||||
default=1, ge=0, le=1, description="When the T2I-Adapter is last applied (% of total steps)"
|
default=1, ge=0, le=1, description="When the T2I-Adapter is last applied (% of total steps)"
|
||||||
@ -71,6 +83,17 @@ class T2IAdapterInvocation(BaseInvocation):
|
|||||||
description="The resize mode applied to the T2I-Adapter input image so that it matches the target output size.",
|
description="The resize mode applied to the T2I-Adapter input image so that it matches the target output size.",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@field_validator("weight")
|
||||||
|
@classmethod
|
||||||
|
def validate_ip_adapter_weight(cls, v):
|
||||||
|
validate_weights(v)
|
||||||
|
return v
|
||||||
|
|
||||||
|
@model_validator(mode="after")
|
||||||
|
def validate_begin_end_step_percent(self):
|
||||||
|
validate_begin_end_step(self.begin_step_percent, self.end_step_percent)
|
||||||
|
return self
|
||||||
|
|
||||||
def invoke(self, context: InvocationContext) -> T2IAdapterOutput:
|
def invoke(self, context: InvocationContext) -> T2IAdapterOutput:
|
||||||
return T2IAdapterOutput(
|
return T2IAdapterOutput(
|
||||||
t2i_adapter=T2IAdapterField(
|
t2i_adapter=T2IAdapterField(
|
||||||
|
14
invokeai/app/invocations/util.py
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
from typing import Union
|
||||||
|
|
||||||
|
|
||||||
|
def validate_weights(weights: Union[float, list[float]]) -> None:
|
||||||
|
"""Validate that all control weights in the valid range"""
|
||||||
|
to_validate = weights if isinstance(weights, list) else [weights]
|
||||||
|
if any(i < -1 or i > 2 for i in to_validate):
|
||||||
|
raise ValueError("Control weights must be within -1 to 2 range")
|
||||||
|
|
||||||
|
|
||||||
|
def validate_begin_end_step(begin_step_percent: float, end_step_percent: float) -> None:
|
||||||
|
"""Validate that begin_step_percent is less than end_step_percent"""
|
||||||
|
if begin_step_percent >= end_step_percent:
|
||||||
|
raise ValueError("Begin step percent must be less than or equal to end step percent")
|
@ -1,5 +1,7 @@
|
|||||||
"""Init file for InvokeAI configure package."""
|
"""Init file for InvokeAI configure package."""
|
||||||
|
|
||||||
|
from invokeai.app.services.config.config_common import PagingArgumentParser
|
||||||
|
|
||||||
from .config_default import InvokeAIAppConfig, get_invokeai_config
|
from .config_default import InvokeAIAppConfig, get_invokeai_config
|
||||||
|
|
||||||
__all__ = ["InvokeAIAppConfig", "get_invokeai_config"]
|
__all__ = ["InvokeAIAppConfig", "get_invokeai_config", "PagingArgumentParser"]
|
||||||
|
@ -209,7 +209,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
"""Configuration object for InvokeAI App."""
|
"""Configuration object for InvokeAI App."""
|
||||||
|
|
||||||
singleton_config: ClassVar[Optional[InvokeAIAppConfig]] = None
|
singleton_config: ClassVar[Optional[InvokeAIAppConfig]] = None
|
||||||
singleton_init: ClassVar[Optional[Dict]] = None
|
singleton_init: ClassVar[Optional[Dict[str, Any]]] = None
|
||||||
|
|
||||||
# fmt: off
|
# fmt: off
|
||||||
type: Literal["InvokeAI"] = "InvokeAI"
|
type: Literal["InvokeAI"] = "InvokeAI"
|
||||||
@ -263,7 +263,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
|
|
||||||
# DEVICE
|
# DEVICE
|
||||||
device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", json_schema_extra=Categories.Device)
|
device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", json_schema_extra=Categories.Device)
|
||||||
precision : Literal["auto", "float16", "float32", "autocast"] = Field(default="auto", description="Floating point precision", json_schema_extra=Categories.Device)
|
precision : Literal["auto", "float16", "bfloat16", "float32", "autocast"] = Field(default="auto", description="Floating point precision", json_schema_extra=Categories.Device)
|
||||||
|
|
||||||
# GENERATION
|
# GENERATION
|
||||||
sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", json_schema_extra=Categories.Generation)
|
sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", json_schema_extra=Categories.Generation)
|
||||||
@ -301,8 +301,8 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
self,
|
self,
|
||||||
argv: Optional[list[str]] = None,
|
argv: Optional[list[str]] = None,
|
||||||
conf: Optional[DictConfig] = None,
|
conf: Optional[DictConfig] = None,
|
||||||
clobber=False,
|
clobber: Optional[bool] = False,
|
||||||
):
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Update settings with contents of init file, environment, and command-line settings.
|
Update settings with contents of init file, environment, and command-line settings.
|
||||||
|
|
||||||
@ -337,7 +337,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_config(cls, **kwargs: Dict[str, Any]) -> InvokeAIAppConfig:
|
def get_config(cls, **kwargs: Any) -> InvokeAIAppConfig:
|
||||||
"""Return a singleton InvokeAIAppConfig configuration object."""
|
"""Return a singleton InvokeAIAppConfig configuration object."""
|
||||||
if (
|
if (
|
||||||
cls.singleton_config is None
|
cls.singleton_config is None
|
||||||
@ -455,7 +455,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
return _find_root()
|
return _find_root()
|
||||||
|
|
||||||
|
|
||||||
def get_invokeai_config(**kwargs) -> InvokeAIAppConfig:
|
def get_invokeai_config(**kwargs: Any) -> InvokeAIAppConfig:
|
||||||
"""Legacy function which returns InvokeAIAppConfig.get_config()."""
|
"""Legacy function which returns InvokeAIAppConfig.get_config()."""
|
||||||
return InvokeAIAppConfig.get_config(**kwargs)
|
return InvokeAIAppConfig.get_config(**kwargs)
|
||||||
|
|
||||||
|
@ -34,6 +34,7 @@ class ServiceInactiveException(Exception):
|
|||||||
|
|
||||||
|
|
||||||
DownloadEventHandler = Callable[["DownloadJob"], None]
|
DownloadEventHandler = Callable[["DownloadJob"], None]
|
||||||
|
DownloadExceptionHandler = Callable[["DownloadJob", Optional[Exception]], None]
|
||||||
|
|
||||||
|
|
||||||
@total_ordering
|
@total_ordering
|
||||||
@ -55,6 +56,7 @@ class DownloadJob(BaseModel):
|
|||||||
job_ended: Optional[str] = Field(
|
job_ended: Optional[str] = Field(
|
||||||
default=None, description="Timestamp for when the download job ende1d (completed or errored)"
|
default=None, description="Timestamp for when the download job ende1d (completed or errored)"
|
||||||
)
|
)
|
||||||
|
content_type: Optional[str] = Field(default=None, description="Content type of downloaded file")
|
||||||
bytes: int = Field(default=0, description="Bytes downloaded so far")
|
bytes: int = Field(default=0, description="Bytes downloaded so far")
|
||||||
total_bytes: int = Field(default=0, description="Total file size (bytes)")
|
total_bytes: int = Field(default=0, description="Total file size (bytes)")
|
||||||
|
|
||||||
@ -70,7 +72,11 @@ class DownloadJob(BaseModel):
|
|||||||
_on_progress: Optional[DownloadEventHandler] = PrivateAttr(default=None)
|
_on_progress: Optional[DownloadEventHandler] = PrivateAttr(default=None)
|
||||||
_on_complete: Optional[DownloadEventHandler] = PrivateAttr(default=None)
|
_on_complete: Optional[DownloadEventHandler] = PrivateAttr(default=None)
|
||||||
_on_cancelled: Optional[DownloadEventHandler] = PrivateAttr(default=None)
|
_on_cancelled: Optional[DownloadEventHandler] = PrivateAttr(default=None)
|
||||||
_on_error: Optional[DownloadEventHandler] = PrivateAttr(default=None)
|
_on_error: Optional[DownloadExceptionHandler] = PrivateAttr(default=None)
|
||||||
|
|
||||||
|
def __hash__(self) -> int:
|
||||||
|
"""Return hash of the string representation of this object, for indexing."""
|
||||||
|
return hash(str(self))
|
||||||
|
|
||||||
def __le__(self, other: "DownloadJob") -> bool:
|
def __le__(self, other: "DownloadJob") -> bool:
|
||||||
"""Return True if this job's priority is less than another's."""
|
"""Return True if this job's priority is less than another's."""
|
||||||
@ -87,6 +93,26 @@ class DownloadJob(BaseModel):
|
|||||||
"""Call to cancel the job."""
|
"""Call to cancel the job."""
|
||||||
return self._cancelled
|
return self._cancelled
|
||||||
|
|
||||||
|
@property
|
||||||
|
def complete(self) -> bool:
|
||||||
|
"""Return true if job completed without errors."""
|
||||||
|
return self.status == DownloadJobStatus.COMPLETED
|
||||||
|
|
||||||
|
@property
|
||||||
|
def running(self) -> bool:
|
||||||
|
"""Return true if the job is running."""
|
||||||
|
return self.status == DownloadJobStatus.RUNNING
|
||||||
|
|
||||||
|
@property
|
||||||
|
def errored(self) -> bool:
|
||||||
|
"""Return true if the job is errored."""
|
||||||
|
return self.status == DownloadJobStatus.ERROR
|
||||||
|
|
||||||
|
@property
|
||||||
|
def in_terminal_state(self) -> bool:
|
||||||
|
"""Return true if job has finished, one way or another."""
|
||||||
|
return self.status not in [DownloadJobStatus.WAITING, DownloadJobStatus.RUNNING]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def on_start(self) -> Optional[DownloadEventHandler]:
|
def on_start(self) -> Optional[DownloadEventHandler]:
|
||||||
"""Return the on_start event handler."""
|
"""Return the on_start event handler."""
|
||||||
@ -103,7 +129,7 @@ class DownloadJob(BaseModel):
|
|||||||
return self._on_complete
|
return self._on_complete
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def on_error(self) -> Optional[DownloadEventHandler]:
|
def on_error(self) -> Optional[DownloadExceptionHandler]:
|
||||||
"""Return the on_error event handler."""
|
"""Return the on_error event handler."""
|
||||||
return self._on_error
|
return self._on_error
|
||||||
|
|
||||||
@ -118,7 +144,7 @@ class DownloadJob(BaseModel):
|
|||||||
on_progress: Optional[DownloadEventHandler] = None,
|
on_progress: Optional[DownloadEventHandler] = None,
|
||||||
on_complete: Optional[DownloadEventHandler] = None,
|
on_complete: Optional[DownloadEventHandler] = None,
|
||||||
on_cancelled: Optional[DownloadEventHandler] = None,
|
on_cancelled: Optional[DownloadEventHandler] = None,
|
||||||
on_error: Optional[DownloadEventHandler] = None,
|
on_error: Optional[DownloadExceptionHandler] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Set the callbacks for download events."""
|
"""Set the callbacks for download events."""
|
||||||
self._on_start = on_start
|
self._on_start = on_start
|
||||||
@ -150,10 +176,10 @@ class DownloadQueueServiceBase(ABC):
|
|||||||
on_progress: Optional[DownloadEventHandler] = None,
|
on_progress: Optional[DownloadEventHandler] = None,
|
||||||
on_complete: Optional[DownloadEventHandler] = None,
|
on_complete: Optional[DownloadEventHandler] = None,
|
||||||
on_cancelled: Optional[DownloadEventHandler] = None,
|
on_cancelled: Optional[DownloadEventHandler] = None,
|
||||||
on_error: Optional[DownloadEventHandler] = None,
|
on_error: Optional[DownloadExceptionHandler] = None,
|
||||||
) -> DownloadJob:
|
) -> DownloadJob:
|
||||||
"""
|
"""
|
||||||
Create a download job.
|
Create and enqueue download job.
|
||||||
|
|
||||||
:param source: Source of the download as a URL.
|
:param source: Source of the download as a URL.
|
||||||
:param dest: Path to download to. See below.
|
:param dest: Path to download to. See below.
|
||||||
@ -175,6 +201,25 @@ class DownloadQueueServiceBase(ABC):
|
|||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def submit_download_job(
|
||||||
|
self,
|
||||||
|
job: DownloadJob,
|
||||||
|
on_start: Optional[DownloadEventHandler] = None,
|
||||||
|
on_progress: Optional[DownloadEventHandler] = None,
|
||||||
|
on_complete: Optional[DownloadEventHandler] = None,
|
||||||
|
on_cancelled: Optional[DownloadEventHandler] = None,
|
||||||
|
on_error: Optional[DownloadExceptionHandler] = None,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Enqueue a download job.
|
||||||
|
|
||||||
|
:param job: The DownloadJob
|
||||||
|
:param on_start, on_progress, on_complete, on_error: Callbacks for the indicated
|
||||||
|
events.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def list_jobs(self) -> List[DownloadJob]:
|
def list_jobs(self) -> List[DownloadJob]:
|
||||||
"""
|
"""
|
||||||
@ -197,21 +242,21 @@ class DownloadQueueServiceBase(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def cancel_all_jobs(self):
|
def cancel_all_jobs(self) -> None:
|
||||||
"""Cancel all active and enquedjobs."""
|
"""Cancel all active and enquedjobs."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def prune_jobs(self):
|
def prune_jobs(self) -> None:
|
||||||
"""Prune completed and errored queue items from the job list."""
|
"""Prune completed and errored queue items from the job list."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def cancel_job(self, job: DownloadJob):
|
def cancel_job(self, job: DownloadJob) -> None:
|
||||||
"""Cancel the job, clearing partial downloads and putting it into ERROR state."""
|
"""Cancel the job, clearing partial downloads and putting it into ERROR state."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def join(self):
|
def join(self) -> None:
|
||||||
"""Wait until all jobs are off the queue."""
|
"""Wait until all jobs are off the queue."""
|
||||||
pass
|
pass
|
||||||
|
@ -5,10 +5,9 @@ import os
|
|||||||
import re
|
import re
|
||||||
import threading
|
import threading
|
||||||
import traceback
|
import traceback
|
||||||
from logging import Logger
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from queue import Empty, PriorityQueue
|
from queue import Empty, PriorityQueue
|
||||||
from typing import Any, Dict, List, Optional, Set
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
from pydantic.networks import AnyHttpUrl
|
from pydantic.networks import AnyHttpUrl
|
||||||
@ -21,6 +20,7 @@ from invokeai.backend.util.logging import InvokeAILogger
|
|||||||
|
|
||||||
from .download_base import (
|
from .download_base import (
|
||||||
DownloadEventHandler,
|
DownloadEventHandler,
|
||||||
|
DownloadExceptionHandler,
|
||||||
DownloadJob,
|
DownloadJob,
|
||||||
DownloadJobCancelledException,
|
DownloadJobCancelledException,
|
||||||
DownloadJobStatus,
|
DownloadJobStatus,
|
||||||
@ -36,18 +36,6 @@ DOWNLOAD_CHUNK_SIZE = 100000
|
|||||||
class DownloadQueueService(DownloadQueueServiceBase):
|
class DownloadQueueService(DownloadQueueServiceBase):
|
||||||
"""Class for queued download of models."""
|
"""Class for queued download of models."""
|
||||||
|
|
||||||
_jobs: Dict[int, DownloadJob]
|
|
||||||
_max_parallel_dl: int = 5
|
|
||||||
_worker_pool: Set[threading.Thread]
|
|
||||||
_queue: PriorityQueue[DownloadJob]
|
|
||||||
_stop_event: threading.Event
|
|
||||||
_lock: threading.Lock
|
|
||||||
_logger: Logger
|
|
||||||
_events: Optional[EventServiceBase] = None
|
|
||||||
_next_job_id: int = 0
|
|
||||||
_accept_download_requests: bool = False
|
|
||||||
_requests: requests.sessions.Session
|
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
max_parallel_dl: int = 5,
|
max_parallel_dl: int = 5,
|
||||||
@ -99,6 +87,33 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
|||||||
self._stop_event.set()
|
self._stop_event.set()
|
||||||
self._worker_pool.clear()
|
self._worker_pool.clear()
|
||||||
|
|
||||||
|
def submit_download_job(
|
||||||
|
self,
|
||||||
|
job: DownloadJob,
|
||||||
|
on_start: Optional[DownloadEventHandler] = None,
|
||||||
|
on_progress: Optional[DownloadEventHandler] = None,
|
||||||
|
on_complete: Optional[DownloadEventHandler] = None,
|
||||||
|
on_cancelled: Optional[DownloadEventHandler] = None,
|
||||||
|
on_error: Optional[DownloadExceptionHandler] = None,
|
||||||
|
) -> None:
|
||||||
|
"""Enqueue a download job."""
|
||||||
|
if not self._accept_download_requests:
|
||||||
|
raise ServiceInactiveException(
|
||||||
|
"The download service is not currently accepting requests. Please call start() to initialize the service."
|
||||||
|
)
|
||||||
|
with self._lock:
|
||||||
|
job.id = self._next_job_id
|
||||||
|
self._next_job_id += 1
|
||||||
|
job.set_callbacks(
|
||||||
|
on_start=on_start,
|
||||||
|
on_progress=on_progress,
|
||||||
|
on_complete=on_complete,
|
||||||
|
on_cancelled=on_cancelled,
|
||||||
|
on_error=on_error,
|
||||||
|
)
|
||||||
|
self._jobs[job.id] = job
|
||||||
|
self._queue.put(job)
|
||||||
|
|
||||||
def download(
|
def download(
|
||||||
self,
|
self,
|
||||||
source: AnyHttpUrl,
|
source: AnyHttpUrl,
|
||||||
@ -109,32 +124,27 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
|||||||
on_progress: Optional[DownloadEventHandler] = None,
|
on_progress: Optional[DownloadEventHandler] = None,
|
||||||
on_complete: Optional[DownloadEventHandler] = None,
|
on_complete: Optional[DownloadEventHandler] = None,
|
||||||
on_cancelled: Optional[DownloadEventHandler] = None,
|
on_cancelled: Optional[DownloadEventHandler] = None,
|
||||||
on_error: Optional[DownloadEventHandler] = None,
|
on_error: Optional[DownloadExceptionHandler] = None,
|
||||||
) -> DownloadJob:
|
) -> DownloadJob:
|
||||||
"""Create a download job and return its ID."""
|
"""Create and enqueue a download job and return it."""
|
||||||
if not self._accept_download_requests:
|
if not self._accept_download_requests:
|
||||||
raise ServiceInactiveException(
|
raise ServiceInactiveException(
|
||||||
"The download service is not currently accepting requests. Please call start() to initialize the service."
|
"The download service is not currently accepting requests. Please call start() to initialize the service."
|
||||||
)
|
)
|
||||||
with self._lock:
|
job = DownloadJob(
|
||||||
id = self._next_job_id
|
source=source,
|
||||||
self._next_job_id += 1
|
dest=dest,
|
||||||
job = DownloadJob(
|
priority=priority,
|
||||||
id=id,
|
access_token=access_token,
|
||||||
source=source,
|
)
|
||||||
dest=dest,
|
self.submit_download_job(
|
||||||
priority=priority,
|
job,
|
||||||
access_token=access_token,
|
on_start=on_start,
|
||||||
)
|
on_progress=on_progress,
|
||||||
job.set_callbacks(
|
on_complete=on_complete,
|
||||||
on_start=on_start,
|
on_cancelled=on_cancelled,
|
||||||
on_progress=on_progress,
|
on_error=on_error,
|
||||||
on_complete=on_complete,
|
)
|
||||||
on_cancelled=on_cancelled,
|
|
||||||
on_error=on_error,
|
|
||||||
)
|
|
||||||
self._jobs[id] = job
|
|
||||||
self._queue.put(job)
|
|
||||||
return job
|
return job
|
||||||
|
|
||||||
def join(self) -> None:
|
def join(self) -> None:
|
||||||
@ -150,7 +160,7 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
|||||||
with self._lock:
|
with self._lock:
|
||||||
to_delete = set()
|
to_delete = set()
|
||||||
for job_id, job in self._jobs.items():
|
for job_id, job in self._jobs.items():
|
||||||
if self._in_terminal_state(job):
|
if job.in_terminal_state:
|
||||||
to_delete.add(job_id)
|
to_delete.add(job_id)
|
||||||
for job_id in to_delete:
|
for job_id in to_delete:
|
||||||
del self._jobs[job_id]
|
del self._jobs[job_id]
|
||||||
@ -172,19 +182,12 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
|||||||
with self._lock:
|
with self._lock:
|
||||||
job.cancel()
|
job.cancel()
|
||||||
|
|
||||||
def cancel_all_jobs(self, preserve_partial: bool = False) -> None:
|
def cancel_all_jobs(self) -> None:
|
||||||
"""Cancel all jobs (those not in enqueued, running or paused state)."""
|
"""Cancel all jobs (those not in enqueued, running or paused state)."""
|
||||||
for job in self._jobs.values():
|
for job in self._jobs.values():
|
||||||
if not self._in_terminal_state(job):
|
if not job.in_terminal_state:
|
||||||
self.cancel_job(job)
|
self.cancel_job(job)
|
||||||
|
|
||||||
def _in_terminal_state(self, job: DownloadJob) -> bool:
|
|
||||||
return job.status in [
|
|
||||||
DownloadJobStatus.COMPLETED,
|
|
||||||
DownloadJobStatus.CANCELLED,
|
|
||||||
DownloadJobStatus.ERROR,
|
|
||||||
]
|
|
||||||
|
|
||||||
def _start_workers(self, max_workers: int) -> None:
|
def _start_workers(self, max_workers: int) -> None:
|
||||||
"""Start the requested number of worker threads."""
|
"""Start the requested number of worker threads."""
|
||||||
self._stop_event.clear()
|
self._stop_event.clear()
|
||||||
@ -214,7 +217,7 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
|||||||
except (OSError, HTTPError) as excp:
|
except (OSError, HTTPError) as excp:
|
||||||
job.error_type = excp.__class__.__name__ + f"({str(excp)})"
|
job.error_type = excp.__class__.__name__ + f"({str(excp)})"
|
||||||
job.error = traceback.format_exc()
|
job.error = traceback.format_exc()
|
||||||
self._signal_job_error(job)
|
self._signal_job_error(job, excp)
|
||||||
except DownloadJobCancelledException:
|
except DownloadJobCancelledException:
|
||||||
self._signal_job_cancelled(job)
|
self._signal_job_cancelled(job)
|
||||||
self._cleanup_cancelled_job(job)
|
self._cleanup_cancelled_job(job)
|
||||||
@ -235,6 +238,8 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
|||||||
resp = self._requests.get(str(url), headers=header, stream=True)
|
resp = self._requests.get(str(url), headers=header, stream=True)
|
||||||
if not resp.ok:
|
if not resp.ok:
|
||||||
raise HTTPError(resp.reason)
|
raise HTTPError(resp.reason)
|
||||||
|
|
||||||
|
job.content_type = resp.headers.get("Content-Type")
|
||||||
content_length = int(resp.headers.get("content-length", 0))
|
content_length = int(resp.headers.get("content-length", 0))
|
||||||
job.total_bytes = content_length
|
job.total_bytes = content_length
|
||||||
|
|
||||||
@ -296,6 +301,7 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
|||||||
self._signal_job_progress(job)
|
self._signal_job_progress(job)
|
||||||
|
|
||||||
# if we get here we are done and can rename the file to the original dest
|
# if we get here we are done and can rename the file to the original dest
|
||||||
|
self._logger.debug(f"{job.source}: saved to {job.download_path} (bytes={job.bytes})")
|
||||||
in_progress_path.rename(job.download_path)
|
in_progress_path.rename(job.download_path)
|
||||||
|
|
||||||
def _validate_filename(self, directory: str, filename: str) -> bool:
|
def _validate_filename(self, directory: str, filename: str) -> bool:
|
||||||
@ -322,7 +328,9 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
|||||||
try:
|
try:
|
||||||
job.on_start(job)
|
job.on_start(job)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self._logger.error(e)
|
self._logger.error(
|
||||||
|
f"An error occurred while processing the on_start callback: {traceback.format_exception(e)}"
|
||||||
|
)
|
||||||
if self._event_bus:
|
if self._event_bus:
|
||||||
assert job.download_path
|
assert job.download_path
|
||||||
self._event_bus.emit_download_started(str(job.source), job.download_path.as_posix())
|
self._event_bus.emit_download_started(str(job.source), job.download_path.as_posix())
|
||||||
@ -332,7 +340,9 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
|||||||
try:
|
try:
|
||||||
job.on_progress(job)
|
job.on_progress(job)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self._logger.error(e)
|
self._logger.error(
|
||||||
|
f"An error occurred while processing the on_progress callback: {traceback.format_exception(e)}"
|
||||||
|
)
|
||||||
if self._event_bus:
|
if self._event_bus:
|
||||||
assert job.download_path
|
assert job.download_path
|
||||||
self._event_bus.emit_download_progress(
|
self._event_bus.emit_download_progress(
|
||||||
@ -348,7 +358,9 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
|||||||
try:
|
try:
|
||||||
job.on_complete(job)
|
job.on_complete(job)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self._logger.error(e)
|
self._logger.error(
|
||||||
|
f"An error occurred while processing the on_complete callback: {traceback.format_exception(e)}"
|
||||||
|
)
|
||||||
if self._event_bus:
|
if self._event_bus:
|
||||||
assert job.download_path
|
assert job.download_path
|
||||||
self._event_bus.emit_download_complete(
|
self._event_bus.emit_download_complete(
|
||||||
@ -356,29 +368,36 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _signal_job_cancelled(self, job: DownloadJob) -> None:
|
def _signal_job_cancelled(self, job: DownloadJob) -> None:
|
||||||
|
if job.status not in [DownloadJobStatus.RUNNING, DownloadJobStatus.WAITING]:
|
||||||
|
return
|
||||||
job.status = DownloadJobStatus.CANCELLED
|
job.status = DownloadJobStatus.CANCELLED
|
||||||
if job.on_cancelled:
|
if job.on_cancelled:
|
||||||
try:
|
try:
|
||||||
job.on_cancelled(job)
|
job.on_cancelled(job)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self._logger.error(e)
|
self._logger.error(
|
||||||
|
f"An error occurred while processing the on_cancelled callback: {traceback.format_exception(e)}"
|
||||||
|
)
|
||||||
if self._event_bus:
|
if self._event_bus:
|
||||||
self._event_bus.emit_download_cancelled(str(job.source))
|
self._event_bus.emit_download_cancelled(str(job.source))
|
||||||
|
|
||||||
def _signal_job_error(self, job: DownloadJob) -> None:
|
def _signal_job_error(self, job: DownloadJob, excp: Optional[Exception] = None) -> None:
|
||||||
job.status = DownloadJobStatus.ERROR
|
job.status = DownloadJobStatus.ERROR
|
||||||
|
self._logger.error(f"{str(job.source)}: {traceback.format_exception(excp)}")
|
||||||
if job.on_error:
|
if job.on_error:
|
||||||
try:
|
try:
|
||||||
job.on_error(job)
|
job.on_error(job, excp)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self._logger.error(e)
|
self._logger.error(
|
||||||
|
f"An error occurred while processing the on_error callback: {traceback.format_exception(e)}"
|
||||||
|
)
|
||||||
if self._event_bus:
|
if self._event_bus:
|
||||||
assert job.error_type
|
assert job.error_type
|
||||||
assert job.error
|
assert job.error
|
||||||
self._event_bus.emit_download_error(str(job.source), error_type=job.error_type, error=job.error)
|
self._event_bus.emit_download_error(str(job.source), error_type=job.error_type, error=job.error)
|
||||||
|
|
||||||
def _cleanup_cancelled_job(self, job: DownloadJob) -> None:
|
def _cleanup_cancelled_job(self, job: DownloadJob) -> None:
|
||||||
self._logger.warning(f"Cleaning up leftover files from cancelled download job {job.download_path}")
|
self._logger.debug(f"Cleaning up leftover files from cancelled download job {job.download_path}")
|
||||||
try:
|
try:
|
||||||
if job.download_path:
|
if job.download_path:
|
||||||
partial_file = self._in_progress_path(job.download_path)
|
partial_file = self._in_progress_path(job.download_path)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
|
||||||
from typing import Any, Optional
|
from typing import Any, Dict, List, Optional, Union
|
||||||
|
|
||||||
from invokeai.app.services.invocation_processor.invocation_processor_common import ProgressImage
|
from invokeai.app.services.invocation_processor.invocation_processor_common import ProgressImage
|
||||||
from invokeai.app.services.session_queue.session_queue_common import (
|
from invokeai.app.services.session_queue.session_queue_common import (
|
||||||
@ -404,53 +404,72 @@ class EventServiceBase:
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
def emit_model_install_started(self, source: str) -> None:
|
def emit_model_install_downloading(
|
||||||
|
self,
|
||||||
|
source: str,
|
||||||
|
local_path: str,
|
||||||
|
bytes: int,
|
||||||
|
total_bytes: int,
|
||||||
|
parts: List[Dict[str, Union[str, int]]],
|
||||||
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Emitted when an install job is started.
|
Emit at intervals while the install job is in progress (remote models only).
|
||||||
|
|
||||||
|
:param source: Source of the model
|
||||||
|
:param local_path: Where model is downloading to
|
||||||
|
:param parts: Progress of downloading URLs that comprise the model, if any.
|
||||||
|
:param bytes: Number of bytes downloaded so far.
|
||||||
|
:param total_bytes: Total size of download, including all files.
|
||||||
|
This emits a Dict with keys "source", "local_path", "bytes" and "total_bytes".
|
||||||
|
"""
|
||||||
|
self.__emit_model_event(
|
||||||
|
event_name="model_install_downloading",
|
||||||
|
payload={
|
||||||
|
"source": source,
|
||||||
|
"local_path": local_path,
|
||||||
|
"bytes": bytes,
|
||||||
|
"total_bytes": total_bytes,
|
||||||
|
"parts": parts,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
def emit_model_install_running(self, source: str) -> None:
|
||||||
|
"""
|
||||||
|
Emit once when an install job becomes active.
|
||||||
|
|
||||||
:param source: Source of the model; local path, repo_id or url
|
:param source: Source of the model; local path, repo_id or url
|
||||||
"""
|
"""
|
||||||
self.__emit_model_event(
|
self.__emit_model_event(
|
||||||
event_name="model_install_started",
|
event_name="model_install_running",
|
||||||
payload={"source": source},
|
payload={"source": source},
|
||||||
)
|
)
|
||||||
|
|
||||||
def emit_model_install_completed(self, source: str, key: str) -> None:
|
def emit_model_install_completed(self, source: str, key: str, total_bytes: Optional[int] = None) -> None:
|
||||||
"""
|
"""
|
||||||
Emitted when an install job is completed successfully.
|
Emit when an install job is completed successfully.
|
||||||
|
|
||||||
:param source: Source of the model; local path, repo_id or url
|
:param source: Source of the model; local path, repo_id or url
|
||||||
:param key: Model config record key
|
:param key: Model config record key
|
||||||
|
:param total_bytes: Size of the model (may be None for installation of a local path)
|
||||||
"""
|
"""
|
||||||
self.__emit_model_event(
|
self.__emit_model_event(
|
||||||
event_name="model_install_completed",
|
event_name="model_install_completed",
|
||||||
payload={
|
payload={
|
||||||
"source": source,
|
"source": source,
|
||||||
|
"total_bytes": total_bytes,
|
||||||
"key": key,
|
"key": key,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
def emit_model_install_progress(
|
def emit_model_install_cancelled(self, source: str) -> None:
|
||||||
self,
|
|
||||||
source: str,
|
|
||||||
current_bytes: int,
|
|
||||||
total_bytes: int,
|
|
||||||
) -> None:
|
|
||||||
"""
|
"""
|
||||||
Emitted while the install job is in progress.
|
Emit when an install job is cancelled.
|
||||||
(Downloaded models only)
|
|
||||||
|
|
||||||
:param source: Source of the model
|
:param source: Source of the model; local path, repo_id or url
|
||||||
:param current_bytes: Number of bytes downloaded so far
|
|
||||||
:param total_bytes: Total bytes to download
|
|
||||||
"""
|
"""
|
||||||
self.__emit_model_event(
|
self.__emit_model_event(
|
||||||
event_name="model_install_progress",
|
event_name="model_install_cancelled",
|
||||||
payload={
|
payload={"source": source},
|
||||||
"source": source,
|
|
||||||
"current_bytes": int,
|
|
||||||
"total_bytes": int,
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def emit_model_install_error(
|
def emit_model_install_error(
|
||||||
@ -460,10 +479,11 @@ class EventServiceBase:
|
|||||||
error: str,
|
error: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Emitted when an install job encounters an exception.
|
Emit when an install job encounters an exception.
|
||||||
|
|
||||||
:param source: Source of the model
|
:param source: Source of the model
|
||||||
:param exception: The exception that raised the error
|
:param error_type: The name of the exception
|
||||||
|
:param error: A text description of the exception
|
||||||
"""
|
"""
|
||||||
self.__emit_model_event(
|
self.__emit_model_event(
|
||||||
event_name="model_install_error",
|
event_name="model_install_error",
|
||||||
|
@ -132,7 +132,6 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
|||||||
source_node_id=source_node_id,
|
source_node_id=source_node_id,
|
||||||
result=outputs.model_dump(),
|
result=outputs.model_dump(),
|
||||||
)
|
)
|
||||||
self.__invoker.services.performance_statistics.log_stats()
|
|
||||||
|
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
pass
|
pass
|
||||||
@ -195,6 +194,7 @@ class DefaultInvocationProcessor(InvocationProcessorABC):
|
|||||||
error=traceback.format_exc(),
|
error=traceback.format_exc(),
|
||||||
)
|
)
|
||||||
elif is_complete:
|
elif is_complete:
|
||||||
|
self.__invoker.services.performance_statistics.log_stats(graph_execution_state.id)
|
||||||
self.__invoker.services.events.emit_graph_execution_complete(
|
self.__invoker.services.events.emit_graph_execution_complete(
|
||||||
queue_batch_id=queue_item.session_queue_batch_id,
|
queue_batch_id=queue_item.session_queue_batch_id,
|
||||||
queue_item_id=queue_item.session_queue_item_id,
|
queue_item_id=queue_item.session_queue_item_id,
|
||||||
|
@ -30,23 +30,13 @@ writes to the system log is stored in InvocationServices.performance_statistics.
|
|||||||
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from contextlib import AbstractContextManager
|
from contextlib import AbstractContextManager
|
||||||
from typing import Dict
|
|
||||||
|
|
||||||
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
||||||
from invokeai.backend.model_management.model_cache import CacheStats
|
|
||||||
|
|
||||||
from .invocation_stats_common import NodeLog
|
|
||||||
|
|
||||||
|
|
||||||
class InvocationStatsServiceBase(ABC):
|
class InvocationStatsServiceBase(ABC):
|
||||||
"Abstract base class for recording node memory/time performance statistics"
|
"Abstract base class for recording node memory/time performance statistics"
|
||||||
|
|
||||||
# {graph_id => NodeLog}
|
|
||||||
_stats: Dict[str, NodeLog]
|
|
||||||
_cache_stats: Dict[str, CacheStats]
|
|
||||||
ram_used: float
|
|
||||||
ram_changed: float
|
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
"""
|
"""
|
||||||
@ -77,45 +67,8 @@ class InvocationStatsServiceBase(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def reset_all_stats(self):
|
def log_stats(self, graph_execution_state_id: str):
|
||||||
"""Zero all statistics"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def update_invocation_stats(
|
|
||||||
self,
|
|
||||||
graph_id: str,
|
|
||||||
invocation_type: str,
|
|
||||||
time_used: float,
|
|
||||||
vram_used: float,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Add timing information on execution of a node. Usually
|
|
||||||
used internally.
|
|
||||||
:param graph_id: ID of the graph that is currently executing
|
|
||||||
:param invocation_type: String literal type of the node
|
|
||||||
:param time_used: Time used by node's exection (sec)
|
|
||||||
:param vram_used: Maximum VRAM used during exection (GB)
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def log_stats(self):
|
|
||||||
"""
|
"""
|
||||||
Write out the accumulated statistics to the log or somewhere else.
|
Write out the accumulated statistics to the log or somewhere else.
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def update_mem_stats(
|
|
||||||
self,
|
|
||||||
ram_used: float,
|
|
||||||
ram_changed: float,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Update the collector with RAM memory usage info.
|
|
||||||
|
|
||||||
:param ram_used: How much RAM is currently in use.
|
|
||||||
:param ram_changed: How much RAM changed since last generation.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
@ -1,25 +1,84 @@
|
|||||||
from dataclasses import dataclass, field
|
from collections import defaultdict
|
||||||
from typing import Dict
|
from dataclasses import dataclass
|
||||||
|
|
||||||
# size of GIG in bytes
|
|
||||||
GIG = 1073741824
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class NodeStats:
|
class NodeExecutionStats:
|
||||||
"""Class for tracking execution stats of an invocation node"""
|
"""Class for tracking execution stats of an invocation node."""
|
||||||
|
|
||||||
calls: int = 0
|
invocation_type: str
|
||||||
time_used: float = 0.0 # seconds
|
|
||||||
max_vram: float = 0.0 # GB
|
start_time: float # Seconds since the epoch.
|
||||||
cache_hits: int = 0
|
end_time: float # Seconds since the epoch.
|
||||||
cache_misses: int = 0
|
|
||||||
cache_high_watermark: int = 0
|
start_ram_gb: float # GB
|
||||||
|
end_ram_gb: float # GB
|
||||||
|
|
||||||
|
peak_vram_gb: float # GB
|
||||||
|
|
||||||
|
def total_time(self) -> float:
|
||||||
|
return self.end_time - self.start_time
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
class GraphExecutionStats:
|
||||||
class NodeLog:
|
"""Class for tracking execution stats of a graph."""
|
||||||
"""Class for tracking node usage"""
|
|
||||||
|
|
||||||
# {node_type => NodeStats}
|
def __init__(self):
|
||||||
nodes: Dict[str, NodeStats] = field(default_factory=dict)
|
self._node_stats_list: list[NodeExecutionStats] = []
|
||||||
|
|
||||||
|
def add_node_execution_stats(self, node_stats: NodeExecutionStats):
|
||||||
|
self._node_stats_list.append(node_stats)
|
||||||
|
|
||||||
|
def get_total_run_time(self) -> float:
|
||||||
|
"""Get the total time spent executing nodes in the graph."""
|
||||||
|
total = 0.0
|
||||||
|
for node_stats in self._node_stats_list:
|
||||||
|
total += node_stats.total_time()
|
||||||
|
return total
|
||||||
|
|
||||||
|
def get_first_node_stats(self) -> NodeExecutionStats | None:
|
||||||
|
"""Get the stats of the first node in the graph (by start_time)."""
|
||||||
|
first_node = None
|
||||||
|
for node_stats in self._node_stats_list:
|
||||||
|
if first_node is None or node_stats.start_time < first_node.start_time:
|
||||||
|
first_node = node_stats
|
||||||
|
|
||||||
|
assert first_node is not None
|
||||||
|
return first_node
|
||||||
|
|
||||||
|
def get_last_node_stats(self) -> NodeExecutionStats | None:
|
||||||
|
"""Get the stats of the last node in the graph (by end_time)."""
|
||||||
|
last_node = None
|
||||||
|
for node_stats in self._node_stats_list:
|
||||||
|
if last_node is None or node_stats.end_time > last_node.end_time:
|
||||||
|
last_node = node_stats
|
||||||
|
|
||||||
|
return last_node
|
||||||
|
|
||||||
|
def get_pretty_log(self, graph_execution_state_id: str) -> str:
|
||||||
|
log = f"Graph stats: {graph_execution_state_id}\n"
|
||||||
|
log += f"{'Node':>30} {'Calls':>7}{'Seconds':>9} {'VRAM Used':>10}\n"
|
||||||
|
|
||||||
|
# Log stats aggregated by node type.
|
||||||
|
node_stats_by_type: dict[str, list[NodeExecutionStats]] = defaultdict(list)
|
||||||
|
for node_stats in self._node_stats_list:
|
||||||
|
node_stats_by_type[node_stats.invocation_type].append(node_stats)
|
||||||
|
|
||||||
|
for node_type, node_type_stats_list in node_stats_by_type.items():
|
||||||
|
num_calls = len(node_type_stats_list)
|
||||||
|
time_used = sum([n.total_time() for n in node_type_stats_list])
|
||||||
|
peak_vram = max([n.peak_vram_gb for n in node_type_stats_list])
|
||||||
|
log += f"{node_type:>30} {num_calls:>4} {time_used:7.3f}s {peak_vram:4.3f}G\n"
|
||||||
|
|
||||||
|
# Log stats for the entire graph.
|
||||||
|
log += f"TOTAL GRAPH EXECUTION TIME: {self.get_total_run_time():7.3f}s\n"
|
||||||
|
|
||||||
|
first_node = self.get_first_node_stats()
|
||||||
|
last_node = self.get_last_node_stats()
|
||||||
|
if first_node is not None and last_node is not None:
|
||||||
|
total_wall_time = last_node.end_time - first_node.start_time
|
||||||
|
ram_change = last_node.end_ram_gb - first_node.start_ram_gb
|
||||||
|
log += f"TOTAL GRAPH WALL TIME: {total_wall_time:7.3f}s\n"
|
||||||
|
log += f"RAM used by InvokeAI process: {last_node.end_ram_gb:4.2f}G ({ram_change:+5.3f}G)\n"
|
||||||
|
|
||||||
|
return log
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import time
|
import time
|
||||||
from typing import Dict
|
from contextlib import contextmanager
|
||||||
|
|
||||||
import psutil
|
import psutil
|
||||||
import torch
|
import torch
|
||||||
@ -7,161 +7,119 @@ import torch
|
|||||||
import invokeai.backend.util.logging as logger
|
import invokeai.backend.util.logging as logger
|
||||||
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
||||||
from invokeai.app.services.invoker import Invoker
|
from invokeai.app.services.invoker import Invoker
|
||||||
from invokeai.app.services.model_manager.model_manager_base import ModelManagerServiceBase
|
|
||||||
from invokeai.backend.model_management.model_cache import CacheStats
|
from invokeai.backend.model_management.model_cache import CacheStats
|
||||||
|
|
||||||
from .invocation_stats_base import InvocationStatsServiceBase
|
from .invocation_stats_base import InvocationStatsServiceBase
|
||||||
from .invocation_stats_common import GIG, NodeLog, NodeStats
|
from .invocation_stats_common import GraphExecutionStats, NodeExecutionStats
|
||||||
|
|
||||||
|
# Size of 1GB in bytes.
|
||||||
|
GB = 2**30
|
||||||
|
|
||||||
|
|
||||||
class InvocationStatsService(InvocationStatsServiceBase):
|
class InvocationStatsService(InvocationStatsServiceBase):
|
||||||
"""Accumulate performance information about a running graph. Collects time spent in each node,
|
"""Accumulate performance information about a running graph. Collects time spent in each node,
|
||||||
as well as the maximum and current VRAM utilisation for CUDA systems"""
|
as well as the maximum and current VRAM utilisation for CUDA systems"""
|
||||||
|
|
||||||
_invoker: Invoker
|
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
# {graph_id => NodeLog}
|
# Maps graph_execution_state_id to GraphExecutionStats.
|
||||||
self._stats: Dict[str, NodeLog] = {}
|
self._stats: dict[str, GraphExecutionStats] = {}
|
||||||
self._cache_stats: Dict[str, CacheStats] = {}
|
# Maps graph_execution_state_id to model manager CacheStats.
|
||||||
self.ram_used: float = 0.0
|
self._cache_stats: dict[str, CacheStats] = {}
|
||||||
self.ram_changed: float = 0.0
|
|
||||||
|
|
||||||
def start(self, invoker: Invoker) -> None:
|
def start(self, invoker: Invoker) -> None:
|
||||||
self._invoker = invoker
|
self._invoker = invoker
|
||||||
|
|
||||||
class StatsContext:
|
@contextmanager
|
||||||
"""Context manager for collecting statistics."""
|
def collect_stats(self, invocation: BaseInvocation, graph_execution_state_id: str):
|
||||||
|
if not self._stats.get(graph_execution_state_id):
|
||||||
invocation: BaseInvocation
|
# First time we're seeing this graph_execution_state_id.
|
||||||
collector: "InvocationStatsServiceBase"
|
self._stats[graph_execution_state_id] = GraphExecutionStats()
|
||||||
graph_id: str
|
|
||||||
start_time: float
|
|
||||||
ram_used: int
|
|
||||||
model_manager: ModelManagerServiceBase
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
invocation: BaseInvocation,
|
|
||||||
graph_id: str,
|
|
||||||
model_manager: ModelManagerServiceBase,
|
|
||||||
collector: "InvocationStatsServiceBase",
|
|
||||||
):
|
|
||||||
"""Initialize statistics for this run."""
|
|
||||||
self.invocation = invocation
|
|
||||||
self.collector = collector
|
|
||||||
self.graph_id = graph_id
|
|
||||||
self.start_time = 0.0
|
|
||||||
self.ram_used = 0
|
|
||||||
self.model_manager = model_manager
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
self.start_time = time.time()
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
torch.cuda.reset_peak_memory_stats()
|
|
||||||
self.ram_used = psutil.Process().memory_info().rss
|
|
||||||
if self.model_manager:
|
|
||||||
self.model_manager.collect_cache_stats(self.collector._cache_stats[self.graph_id])
|
|
||||||
|
|
||||||
def __exit__(self, *args):
|
|
||||||
"""Called on exit from the context."""
|
|
||||||
ram_used = psutil.Process().memory_info().rss
|
|
||||||
self.collector.update_mem_stats(
|
|
||||||
ram_used=ram_used / GIG,
|
|
||||||
ram_changed=(ram_used - self.ram_used) / GIG,
|
|
||||||
)
|
|
||||||
self.collector.update_invocation_stats(
|
|
||||||
graph_id=self.graph_id,
|
|
||||||
invocation_type=self.invocation.type, # type: ignore # `type` is not on the `BaseInvocation` model, but *is* on all invocations
|
|
||||||
time_used=time.time() - self.start_time,
|
|
||||||
vram_used=torch.cuda.max_memory_allocated() / GIG if torch.cuda.is_available() else 0.0,
|
|
||||||
)
|
|
||||||
|
|
||||||
def collect_stats(
|
|
||||||
self,
|
|
||||||
invocation: BaseInvocation,
|
|
||||||
graph_execution_state_id: str,
|
|
||||||
) -> StatsContext:
|
|
||||||
if not self._stats.get(graph_execution_state_id): # first time we're seeing this
|
|
||||||
self._stats[graph_execution_state_id] = NodeLog()
|
|
||||||
self._cache_stats[graph_execution_state_id] = CacheStats()
|
self._cache_stats[graph_execution_state_id] = CacheStats()
|
||||||
return self.StatsContext(invocation, graph_execution_state_id, self._invoker.services.model_manager, self)
|
|
||||||
|
|
||||||
def reset_all_stats(self):
|
# Prune stale stats. There should be none since we're starting a new graph, but just in case.
|
||||||
"""Zero all statistics"""
|
self._prune_stale_stats()
|
||||||
self._stats = {}
|
|
||||||
|
# Record state before the invocation.
|
||||||
|
start_time = time.time()
|
||||||
|
start_ram = psutil.Process().memory_info().rss
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
torch.cuda.reset_peak_memory_stats()
|
||||||
|
if self._invoker.services.model_manager:
|
||||||
|
self._invoker.services.model_manager.collect_cache_stats(self._cache_stats[graph_execution_state_id])
|
||||||
|
|
||||||
def reset_stats(self, graph_execution_id: str):
|
|
||||||
try:
|
try:
|
||||||
self._stats.pop(graph_execution_id)
|
# Let the invocation run.
|
||||||
except KeyError:
|
yield None
|
||||||
logger.warning(f"Attempted to clear statistics for unknown graph {graph_execution_id}")
|
finally:
|
||||||
|
# Record state after the invocation.
|
||||||
|
node_stats = NodeExecutionStats(
|
||||||
|
invocation_type=invocation.type,
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=time.time(),
|
||||||
|
start_ram_gb=start_ram / GB,
|
||||||
|
end_ram_gb=psutil.Process().memory_info().rss / GB,
|
||||||
|
peak_vram_gb=torch.cuda.max_memory_allocated() / GB if torch.cuda.is_available() else 0.0,
|
||||||
|
)
|
||||||
|
self._stats[graph_execution_state_id].add_node_execution_stats(node_stats)
|
||||||
|
|
||||||
def update_mem_stats(
|
def _prune_stale_stats(self):
|
||||||
self,
|
"""Check all graphs being tracked and prune any that have completed/errored.
|
||||||
ram_used: float,
|
|
||||||
ram_changed: float,
|
|
||||||
):
|
|
||||||
self.ram_used = ram_used
|
|
||||||
self.ram_changed = ram_changed
|
|
||||||
|
|
||||||
def update_invocation_stats(
|
This shouldn't be necessary, but we don't have totally robust upstream handling of graph completions/errors, so
|
||||||
self,
|
for now we call this function periodically to prevent them from accumulating.
|
||||||
graph_id: str,
|
"""
|
||||||
invocation_type: str,
|
to_prune = []
|
||||||
time_used: float,
|
for graph_execution_state_id in self._stats:
|
||||||
vram_used: float,
|
|
||||||
):
|
|
||||||
if not self._stats[graph_id].nodes.get(invocation_type):
|
|
||||||
self._stats[graph_id].nodes[invocation_type] = NodeStats()
|
|
||||||
stats = self._stats[graph_id].nodes[invocation_type]
|
|
||||||
stats.calls += 1
|
|
||||||
stats.time_used += time_used
|
|
||||||
stats.max_vram = max(stats.max_vram, vram_used)
|
|
||||||
|
|
||||||
def log_stats(self):
|
|
||||||
completed = set()
|
|
||||||
errored = set()
|
|
||||||
for graph_id, _node_log in self._stats.items():
|
|
||||||
try:
|
try:
|
||||||
current_graph_state = self._invoker.services.graph_execution_manager.get(graph_id)
|
graph_execution_state = self._invoker.services.graph_execution_manager.get(graph_execution_state_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
errored.add(graph_id)
|
# TODO(ryand): What would cause this? Should this exception just be allowed to propagate?
|
||||||
|
logger.warning(f"Failed to get graph state for {graph_execution_state_id}.")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not current_graph_state.is_complete():
|
if not graph_execution_state.is_complete():
|
||||||
|
# The graph is still running, don't prune it.
|
||||||
continue
|
continue
|
||||||
|
|
||||||
total_time = 0
|
to_prune.append(graph_execution_state_id)
|
||||||
logger.info(f"Graph stats: {graph_id}")
|
|
||||||
logger.info(f"{'Node':>30} {'Calls':>7}{'Seconds':>9} {'VRAM Used':>10}")
|
|
||||||
for node_type, stats in self._stats[graph_id].nodes.items():
|
|
||||||
logger.info(f"{node_type:>30} {stats.calls:>4} {stats.time_used:7.3f}s {stats.max_vram:4.3f}G")
|
|
||||||
total_time += stats.time_used
|
|
||||||
|
|
||||||
cache_stats = self._cache_stats[graph_id]
|
for graph_execution_state_id in to_prune:
|
||||||
hwm = cache_stats.high_watermark / GIG
|
del self._stats[graph_execution_state_id]
|
||||||
tot = cache_stats.cache_size / GIG
|
del self._cache_stats[graph_execution_state_id]
|
||||||
loaded = sum(list(cache_stats.loaded_model_sizes.values())) / GIG
|
|
||||||
|
|
||||||
logger.info(f"TOTAL GRAPH EXECUTION TIME: {total_time:7.3f}s")
|
if len(to_prune) > 0:
|
||||||
logger.info("RAM used by InvokeAI process: " + "%4.2fG" % self.ram_used + f" ({self.ram_changed:+5.3f}G)")
|
logger.info(f"Pruned stale graph stats for {to_prune}.")
|
||||||
logger.info(f"RAM used to load models: {loaded:4.2f}G")
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
logger.info("VRAM in use: " + "%4.3fG" % (torch.cuda.memory_allocated() / GIG))
|
|
||||||
logger.info("RAM cache statistics:")
|
|
||||||
logger.info(f" Model cache hits: {cache_stats.hits}")
|
|
||||||
logger.info(f" Model cache misses: {cache_stats.misses}")
|
|
||||||
logger.info(f" Models cached: {cache_stats.in_cache}")
|
|
||||||
logger.info(f" Models cleared from cache: {cache_stats.cleared}")
|
|
||||||
logger.info(f" Cache high water mark: {hwm:4.2f}/{tot:4.2f}G")
|
|
||||||
|
|
||||||
completed.add(graph_id)
|
def reset_stats(self, graph_execution_state_id: str):
|
||||||
|
try:
|
||||||
|
del self._stats[graph_execution_state_id]
|
||||||
|
del self._cache_stats[graph_execution_state_id]
|
||||||
|
except KeyError as e:
|
||||||
|
logger.warning(f"Attempted to clear statistics for unknown graph {graph_execution_state_id}: {e}.")
|
||||||
|
|
||||||
for graph_id in completed:
|
def log_stats(self, graph_execution_state_id: str):
|
||||||
del self._stats[graph_id]
|
try:
|
||||||
del self._cache_stats[graph_id]
|
graph_stats = self._stats[graph_execution_state_id]
|
||||||
|
cache_stats = self._cache_stats[graph_execution_state_id]
|
||||||
|
except KeyError as e:
|
||||||
|
logger.warning(f"Attempted to log statistics for unknown graph {graph_execution_state_id}: {e}.")
|
||||||
|
return
|
||||||
|
|
||||||
for graph_id in errored:
|
log = graph_stats.get_pretty_log(graph_execution_state_id)
|
||||||
del self._stats[graph_id]
|
|
||||||
del self._cache_stats[graph_id]
|
hwm = cache_stats.high_watermark / GB
|
||||||
|
tot = cache_stats.cache_size / GB
|
||||||
|
loaded = sum(list(cache_stats.loaded_model_sizes.values())) / GB
|
||||||
|
log += f"RAM used to load models: {loaded:4.2f}G\n"
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
log += f"VRAM in use: {(torch.cuda.memory_allocated() / GB):4.3f}G\n"
|
||||||
|
log += "RAM cache statistics:\n"
|
||||||
|
log += f" Model cache hits: {cache_stats.hits}\n"
|
||||||
|
log += f" Model cache misses: {cache_stats.misses}\n"
|
||||||
|
log += f" Models cached: {cache_stats.in_cache}\n"
|
||||||
|
log += f" Models cleared from cache: {cache_stats.cleared}\n"
|
||||||
|
log += f" Cache high water mark: {hwm:4.2f}/{tot:4.2f}G\n"
|
||||||
|
logger.info(log)
|
||||||
|
|
||||||
|
del self._stats[graph_execution_state_id]
|
||||||
|
del self._cache_stats[graph_execution_state_id]
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
"""Initialization file for model install service package."""
|
"""Initialization file for model install service package."""
|
||||||
|
|
||||||
from .model_install_base import (
|
from .model_install_base import (
|
||||||
|
CivitaiModelSource,
|
||||||
HFModelSource,
|
HFModelSource,
|
||||||
InstallStatus,
|
InstallStatus,
|
||||||
LocalModelSource,
|
LocalModelSource,
|
||||||
@ -22,4 +23,5 @@ __all__ = [
|
|||||||
"LocalModelSource",
|
"LocalModelSource",
|
||||||
"HFModelSource",
|
"HFModelSource",
|
||||||
"URLModelSource",
|
"URLModelSource",
|
||||||
|
"CivitaiModelSource",
|
||||||
]
|
]
|
||||||
|
@ -1,27 +1,42 @@
|
|||||||
|
# Copyright 2023 Lincoln D. Stein and the InvokeAI development team
|
||||||
|
"""Baseclass definitions for the model installer."""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import traceback
|
import traceback
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, List, Literal, Optional, Union
|
from typing import Any, Dict, List, Literal, Optional, Set, Union
|
||||||
|
|
||||||
from pydantic import BaseModel, Field, field_validator
|
from pydantic import BaseModel, Field, PrivateAttr, field_validator
|
||||||
from pydantic.networks import AnyHttpUrl
|
from pydantic.networks import AnyHttpUrl
|
||||||
from typing_extensions import Annotated
|
from typing_extensions import Annotated
|
||||||
|
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
|
from invokeai.app.services.download import DownloadJob, DownloadQueueServiceBase
|
||||||
from invokeai.app.services.events import EventServiceBase
|
from invokeai.app.services.events import EventServiceBase
|
||||||
|
from invokeai.app.services.invoker import Invoker
|
||||||
from invokeai.app.services.model_records import ModelRecordServiceBase
|
from invokeai.app.services.model_records import ModelRecordServiceBase
|
||||||
from invokeai.backend.model_manager import AnyModelConfig
|
from invokeai.backend.model_manager import AnyModelConfig, ModelRepoVariant
|
||||||
|
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata, ModelMetadataStore
|
||||||
|
|
||||||
|
|
||||||
class InstallStatus(str, Enum):
|
class InstallStatus(str, Enum):
|
||||||
"""State of an install job running in the background."""
|
"""State of an install job running in the background."""
|
||||||
|
|
||||||
WAITING = "waiting" # waiting to be dequeued
|
WAITING = "waiting" # waiting to be dequeued
|
||||||
|
DOWNLOADING = "downloading" # downloading of model files in process
|
||||||
RUNNING = "running" # being processed
|
RUNNING = "running" # being processed
|
||||||
COMPLETED = "completed" # finished running
|
COMPLETED = "completed" # finished running
|
||||||
ERROR = "error" # terminated with an error message
|
ERROR = "error" # terminated with an error message
|
||||||
|
CANCELLED = "cancelled" # terminated with an error message
|
||||||
|
|
||||||
|
|
||||||
|
class ModelInstallPart(BaseModel):
|
||||||
|
url: AnyHttpUrl
|
||||||
|
path: Path
|
||||||
|
bytes: int = 0
|
||||||
|
total_bytes: int = 0
|
||||||
|
|
||||||
|
|
||||||
class UnknownInstallJobException(Exception):
|
class UnknownInstallJobException(Exception):
|
||||||
@ -74,12 +89,31 @@ class LocalModelSource(StringLikeSource):
|
|||||||
return Path(self.path).as_posix()
|
return Path(self.path).as_posix()
|
||||||
|
|
||||||
|
|
||||||
|
class CivitaiModelSource(StringLikeSource):
|
||||||
|
"""A Civitai version id, with optional variant and access token."""
|
||||||
|
|
||||||
|
version_id: int
|
||||||
|
variant: Optional[ModelRepoVariant] = None
|
||||||
|
access_token: Optional[str] = None
|
||||||
|
type: Literal["civitai"] = "civitai"
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
"""Return string version of repoid when string rep needed."""
|
||||||
|
base: str = str(self.version_id)
|
||||||
|
base += f" ({self.variant})" if self.variant else ""
|
||||||
|
return base
|
||||||
|
|
||||||
|
|
||||||
class HFModelSource(StringLikeSource):
|
class HFModelSource(StringLikeSource):
|
||||||
"""A HuggingFace repo_id, with optional variant and sub-folder."""
|
"""
|
||||||
|
A HuggingFace repo_id with optional variant, sub-folder and access token.
|
||||||
|
Note that the variant option, if not provided to the constructor, will default to fp16, which is
|
||||||
|
what people (almost) always want.
|
||||||
|
"""
|
||||||
|
|
||||||
repo_id: str
|
repo_id: str
|
||||||
variant: Optional[str] = None
|
variant: Optional[ModelRepoVariant] = ModelRepoVariant.FP16
|
||||||
subfolder: Optional[str | Path] = None
|
subfolder: Optional[Path] = None
|
||||||
access_token: Optional[str] = None
|
access_token: Optional[str] = None
|
||||||
type: Literal["hf"] = "hf"
|
type: Literal["hf"] = "hf"
|
||||||
|
|
||||||
@ -103,19 +137,22 @@ class URLModelSource(StringLikeSource):
|
|||||||
|
|
||||||
url: AnyHttpUrl
|
url: AnyHttpUrl
|
||||||
access_token: Optional[str] = None
|
access_token: Optional[str] = None
|
||||||
type: Literal["generic_url"] = "generic_url"
|
type: Literal["url"] = "url"
|
||||||
|
|
||||||
def __str__(self) -> str:
|
def __str__(self) -> str:
|
||||||
"""Return string version of the url when string rep needed."""
|
"""Return string version of the url when string rep needed."""
|
||||||
return str(self.url)
|
return str(self.url)
|
||||||
|
|
||||||
|
|
||||||
ModelSource = Annotated[Union[LocalModelSource, HFModelSource, URLModelSource], Field(discriminator="type")]
|
ModelSource = Annotated[
|
||||||
|
Union[LocalModelSource, HFModelSource, CivitaiModelSource, URLModelSource], Field(discriminator="type")
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class ModelInstallJob(BaseModel):
|
class ModelInstallJob(BaseModel):
|
||||||
"""Object that tracks the current status of an install request."""
|
"""Object that tracks the current status of an install request."""
|
||||||
|
|
||||||
|
id: int = Field(description="Unique ID for this job")
|
||||||
status: InstallStatus = Field(default=InstallStatus.WAITING, description="Current status of install process")
|
status: InstallStatus = Field(default=InstallStatus.WAITING, description="Current status of install process")
|
||||||
config_in: Dict[str, Any] = Field(
|
config_in: Dict[str, Any] = Field(
|
||||||
default_factory=dict, description="Configuration information (e.g. 'description') to apply to model."
|
default_factory=dict, description="Configuration information (e.g. 'description') to apply to model."
|
||||||
@ -128,15 +165,74 @@ class ModelInstallJob(BaseModel):
|
|||||||
)
|
)
|
||||||
source: ModelSource = Field(description="Source (URL, repo_id, or local path) of model")
|
source: ModelSource = Field(description="Source (URL, repo_id, or local path) of model")
|
||||||
local_path: Path = Field(description="Path to locally-downloaded model; may be the same as the source")
|
local_path: Path = Field(description="Path to locally-downloaded model; may be the same as the source")
|
||||||
error_type: Optional[str] = Field(default=None, description="Class name of the exception that led to status==ERROR")
|
bytes: Optional[int] = Field(
|
||||||
error: Optional[str] = Field(default=None, description="Error traceback") # noqa #501
|
default=None, description="For a remote model, the number of bytes downloaded so far (may not be available)"
|
||||||
|
)
|
||||||
|
total_bytes: int = Field(default=0, description="Total size of the model to be installed")
|
||||||
|
source_metadata: Optional[AnyModelRepoMetadata] = Field(
|
||||||
|
default=None, description="Metadata provided by the model source"
|
||||||
|
)
|
||||||
|
download_parts: Set[DownloadJob] = Field(
|
||||||
|
default_factory=set, description="Download jobs contributing to this install"
|
||||||
|
)
|
||||||
|
# internal flags and transitory settings
|
||||||
|
_install_tmpdir: Optional[Path] = PrivateAttr(default=None)
|
||||||
|
_exception: Optional[Exception] = PrivateAttr(default=None)
|
||||||
|
|
||||||
def set_error(self, e: Exception) -> None:
|
def set_error(self, e: Exception) -> None:
|
||||||
"""Record the error and traceback from an exception."""
|
"""Record the error and traceback from an exception."""
|
||||||
self.error_type = e.__class__.__name__
|
self._exception = e
|
||||||
self.error = "".join(traceback.format_exception(e))
|
|
||||||
self.status = InstallStatus.ERROR
|
self.status = InstallStatus.ERROR
|
||||||
|
|
||||||
|
def cancel(self) -> None:
|
||||||
|
"""Call to cancel the job."""
|
||||||
|
self.status = InstallStatus.CANCELLED
|
||||||
|
|
||||||
|
@property
|
||||||
|
def error_type(self) -> Optional[str]:
|
||||||
|
"""Class name of the exception that led to status==ERROR."""
|
||||||
|
return self._exception.__class__.__name__ if self._exception else None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def error(self) -> Optional[str]:
|
||||||
|
"""Error traceback."""
|
||||||
|
return "".join(traceback.format_exception(self._exception)) if self._exception else None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def cancelled(self) -> bool:
|
||||||
|
"""Set status to CANCELLED."""
|
||||||
|
return self.status == InstallStatus.CANCELLED
|
||||||
|
|
||||||
|
@property
|
||||||
|
def errored(self) -> bool:
|
||||||
|
"""Return true if job has errored."""
|
||||||
|
return self.status == InstallStatus.ERROR
|
||||||
|
|
||||||
|
@property
|
||||||
|
def waiting(self) -> bool:
|
||||||
|
"""Return true if job is waiting to run."""
|
||||||
|
return self.status == InstallStatus.WAITING
|
||||||
|
|
||||||
|
@property
|
||||||
|
def downloading(self) -> bool:
|
||||||
|
"""Return true if job is downloading."""
|
||||||
|
return self.status == InstallStatus.DOWNLOADING
|
||||||
|
|
||||||
|
@property
|
||||||
|
def running(self) -> bool:
|
||||||
|
"""Return true if job is running."""
|
||||||
|
return self.status == InstallStatus.RUNNING
|
||||||
|
|
||||||
|
@property
|
||||||
|
def complete(self) -> bool:
|
||||||
|
"""Return true if job completed without errors."""
|
||||||
|
return self.status == InstallStatus.COMPLETED
|
||||||
|
|
||||||
|
@property
|
||||||
|
def in_terminal_state(self) -> bool:
|
||||||
|
"""Return true if job is in a terminal state."""
|
||||||
|
return self.status in [InstallStatus.COMPLETED, InstallStatus.ERROR, InstallStatus.CANCELLED]
|
||||||
|
|
||||||
|
|
||||||
class ModelInstallServiceBase(ABC):
|
class ModelInstallServiceBase(ABC):
|
||||||
"""Abstract base class for InvokeAI model installation."""
|
"""Abstract base class for InvokeAI model installation."""
|
||||||
@ -146,6 +242,8 @@ class ModelInstallServiceBase(ABC):
|
|||||||
self,
|
self,
|
||||||
app_config: InvokeAIAppConfig,
|
app_config: InvokeAIAppConfig,
|
||||||
record_store: ModelRecordServiceBase,
|
record_store: ModelRecordServiceBase,
|
||||||
|
download_queue: DownloadQueueServiceBase,
|
||||||
|
metadata_store: ModelMetadataStore,
|
||||||
event_bus: Optional["EventServiceBase"] = None,
|
event_bus: Optional["EventServiceBase"] = None,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
@ -156,12 +254,14 @@ class ModelInstallServiceBase(ABC):
|
|||||||
:param event_bus: InvokeAI event bus for reporting events to.
|
:param event_bus: InvokeAI event bus for reporting events to.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# make the invoker optional here because we don't need it and it
|
||||||
|
# makes the installer harder to use outside the web app
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def start(self, *args: Any, **kwarg: Any) -> None:
|
def start(self, invoker: Optional[Invoker] = None) -> None:
|
||||||
"""Start the installer service."""
|
"""Start the installer service."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def stop(self, *args: Any, **kwarg: Any) -> None:
|
def stop(self, invoker: Optional[Invoker] = None) -> None:
|
||||||
"""Stop the model install service. After this the objection can be safely deleted."""
|
"""Stop the model install service. After this the objection can be safely deleted."""
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -264,9 +364,13 @@ class ModelInstallServiceBase(ABC):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_job(self, source: ModelSource) -> List[ModelInstallJob]:
|
def get_job_by_source(self, source: ModelSource) -> List[ModelInstallJob]:
|
||||||
"""Return the ModelInstallJob(s) corresponding to the provided source."""
|
"""Return the ModelInstallJob(s) corresponding to the provided source."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_job_by_id(self, id: int) -> ModelInstallJob:
|
||||||
|
"""Return the ModelInstallJob corresponding to the provided id. Raises ValueError if no job has that ID."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def list_jobs(self) -> List[ModelInstallJob]: # noqa D102
|
def list_jobs(self) -> List[ModelInstallJob]: # noqa D102
|
||||||
"""
|
"""
|
||||||
@ -278,16 +382,19 @@ class ModelInstallServiceBase(ABC):
|
|||||||
"""Prune all completed and errored jobs."""
|
"""Prune all completed and errored jobs."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def wait_for_installs(self) -> List[ModelInstallJob]:
|
def cancel_job(self, job: ModelInstallJob) -> None:
|
||||||
|
"""Cancel the indicated job."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def wait_for_installs(self, timeout: int = 0) -> List[ModelInstallJob]:
|
||||||
"""
|
"""
|
||||||
Wait for all pending installs to complete.
|
Wait for all pending installs to complete.
|
||||||
|
|
||||||
This will block until all pending installs have
|
This will block until all pending installs have
|
||||||
completed, been cancelled, or errored out. It will
|
completed, been cancelled, or errored out.
|
||||||
block indefinitely if one or more jobs are in the
|
|
||||||
paused state.
|
|
||||||
|
|
||||||
It will return the current list of jobs.
|
:param timeout: Wait up to indicated number of seconds. Raise an Exception('timeout') if
|
||||||
|
installs do not complete within the indicated time.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
|
@ -1,60 +1,72 @@
|
|||||||
"""Model installation class."""
|
"""Model installation class."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
import threading
|
import threading
|
||||||
|
import time
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
from logging import Logger
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from queue import Queue
|
from queue import Empty, Queue
|
||||||
from random import randbytes
|
from random import randbytes
|
||||||
from shutil import copyfile, copytree, move, rmtree
|
from shutil import copyfile, copytree, move, rmtree
|
||||||
|
from tempfile import mkdtemp
|
||||||
from typing import Any, Dict, List, Optional, Set, Union
|
from typing import Any, Dict, List, Optional, Set, Union
|
||||||
|
|
||||||
|
from huggingface_hub import HfFolder
|
||||||
|
from pydantic.networks import AnyHttpUrl
|
||||||
|
from requests import Session
|
||||||
|
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
from invokeai.app.services.events import EventServiceBase
|
from invokeai.app.services.download import DownloadJob, DownloadQueueServiceBase
|
||||||
from invokeai.app.services.model_records import DuplicateModelException, ModelRecordServiceBase, UnknownModelException
|
from invokeai.app.services.events.events_base import EventServiceBase
|
||||||
|
from invokeai.app.services.invoker import Invoker
|
||||||
|
from invokeai.app.services.model_records import DuplicateModelException, ModelRecordServiceBase, ModelRecordServiceSQL
|
||||||
from invokeai.backend.model_manager.config import (
|
from invokeai.backend.model_manager.config import (
|
||||||
AnyModelConfig,
|
AnyModelConfig,
|
||||||
BaseModelType,
|
BaseModelType,
|
||||||
InvalidModelConfigException,
|
InvalidModelConfigException,
|
||||||
|
ModelRepoVariant,
|
||||||
ModelType,
|
ModelType,
|
||||||
)
|
)
|
||||||
from invokeai.backend.model_manager.hash import FastModelHash
|
from invokeai.backend.model_manager.hash import FastModelHash
|
||||||
|
from invokeai.backend.model_manager.metadata import (
|
||||||
|
AnyModelRepoMetadata,
|
||||||
|
CivitaiMetadataFetch,
|
||||||
|
HuggingFaceMetadataFetch,
|
||||||
|
ModelMetadataStore,
|
||||||
|
ModelMetadataWithFiles,
|
||||||
|
RemoteModelFile,
|
||||||
|
)
|
||||||
from invokeai.backend.model_manager.probe import ModelProbe
|
from invokeai.backend.model_manager.probe import ModelProbe
|
||||||
from invokeai.backend.model_manager.search import ModelSearch
|
from invokeai.backend.model_manager.search import ModelSearch
|
||||||
from invokeai.backend.util import Chdir, InvokeAILogger
|
from invokeai.backend.util import Chdir, InvokeAILogger
|
||||||
|
from invokeai.backend.util.devices import choose_precision, choose_torch_device
|
||||||
|
|
||||||
from .model_install_base import (
|
from .model_install_base import (
|
||||||
|
CivitaiModelSource,
|
||||||
|
HFModelSource,
|
||||||
InstallStatus,
|
InstallStatus,
|
||||||
LocalModelSource,
|
LocalModelSource,
|
||||||
ModelInstallJob,
|
ModelInstallJob,
|
||||||
ModelInstallServiceBase,
|
ModelInstallServiceBase,
|
||||||
ModelSource,
|
ModelSource,
|
||||||
|
URLModelSource,
|
||||||
)
|
)
|
||||||
|
|
||||||
# marker that the queue is done and that thread should exit
|
TMPDIR_PREFIX = "tmpinstall_"
|
||||||
STOP_JOB = ModelInstallJob(
|
|
||||||
source=LocalModelSource(path="stop"),
|
|
||||||
local_path=Path("/dev/null"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ModelInstallService(ModelInstallServiceBase):
|
class ModelInstallService(ModelInstallServiceBase):
|
||||||
"""class for InvokeAI model installation."""
|
"""class for InvokeAI model installation."""
|
||||||
|
|
||||||
_app_config: InvokeAIAppConfig
|
|
||||||
_record_store: ModelRecordServiceBase
|
|
||||||
_event_bus: Optional[EventServiceBase] = None
|
|
||||||
_install_queue: Queue[ModelInstallJob]
|
|
||||||
_install_jobs: List[ModelInstallJob]
|
|
||||||
_logger: Logger
|
|
||||||
_cached_model_paths: Set[Path]
|
|
||||||
_models_installed: Set[str]
|
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
app_config: InvokeAIAppConfig,
|
app_config: InvokeAIAppConfig,
|
||||||
record_store: ModelRecordServiceBase,
|
record_store: ModelRecordServiceBase,
|
||||||
|
download_queue: DownloadQueueServiceBase,
|
||||||
|
metadata_store: Optional[ModelMetadataStore] = None,
|
||||||
event_bus: Optional[EventServiceBase] = None,
|
event_bus: Optional[EventServiceBase] = None,
|
||||||
|
session: Optional[Session] = None,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Initialize the installer object.
|
Initialize the installer object.
|
||||||
@ -67,10 +79,26 @@ class ModelInstallService(ModelInstallServiceBase):
|
|||||||
self._record_store = record_store
|
self._record_store = record_store
|
||||||
self._event_bus = event_bus
|
self._event_bus = event_bus
|
||||||
self._logger = InvokeAILogger.get_logger(name=self.__class__.__name__)
|
self._logger = InvokeAILogger.get_logger(name=self.__class__.__name__)
|
||||||
self._install_jobs = []
|
self._install_jobs: List[ModelInstallJob] = []
|
||||||
self._install_queue = Queue()
|
self._install_queue: Queue[ModelInstallJob] = Queue()
|
||||||
self._cached_model_paths = set()
|
self._cached_model_paths: Set[Path] = set()
|
||||||
self._models_installed = set()
|
self._models_installed: Set[str] = set()
|
||||||
|
self._lock = threading.Lock()
|
||||||
|
self._stop_event = threading.Event()
|
||||||
|
self._downloads_changed_event = threading.Event()
|
||||||
|
self._download_queue = download_queue
|
||||||
|
self._download_cache: Dict[AnyHttpUrl, ModelInstallJob] = {}
|
||||||
|
self._running = False
|
||||||
|
self._session = session
|
||||||
|
self._next_job_id = 0
|
||||||
|
# There may not necessarily be a metadata store initialized
|
||||||
|
# so we create one and initialize it with the same sql database
|
||||||
|
# used by the record store service.
|
||||||
|
if metadata_store:
|
||||||
|
self._metadata_store = metadata_store
|
||||||
|
else:
|
||||||
|
assert isinstance(record_store, ModelRecordServiceSQL)
|
||||||
|
self._metadata_store = ModelMetadataStore(record_store.db)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def app_config(self) -> InvokeAIAppConfig: # noqa D102
|
def app_config(self) -> InvokeAIAppConfig: # noqa D102
|
||||||
@ -84,69 +112,31 @@ class ModelInstallService(ModelInstallServiceBase):
|
|||||||
def event_bus(self) -> Optional[EventServiceBase]: # noqa D102
|
def event_bus(self) -> Optional[EventServiceBase]: # noqa D102
|
||||||
return self._event_bus
|
return self._event_bus
|
||||||
|
|
||||||
def start(self, *args: Any, **kwarg: Any) -> None:
|
# make the invoker optional here because we don't need it and it
|
||||||
|
# makes the installer harder to use outside the web app
|
||||||
|
def start(self, invoker: Optional[Invoker] = None) -> None:
|
||||||
"""Start the installer thread."""
|
"""Start the installer thread."""
|
||||||
self._start_installer_thread()
|
with self._lock:
|
||||||
self.sync_to_config()
|
if self._running:
|
||||||
|
raise Exception("Attempt to start the installer service twice")
|
||||||
|
self._start_installer_thread()
|
||||||
|
self._remove_dangling_install_dirs()
|
||||||
|
self.sync_to_config()
|
||||||
|
|
||||||
def stop(self, *args: Any, **kwarg: Any) -> None:
|
def stop(self, invoker: Optional[Invoker] = None) -> None:
|
||||||
"""Stop the installer thread; after this the object can be deleted and garbage collected."""
|
"""Stop the installer thread; after this the object can be deleted and garbage collected."""
|
||||||
self._install_queue.put(STOP_JOB)
|
with self._lock:
|
||||||
|
if not self._running:
|
||||||
def _start_installer_thread(self) -> None:
|
raise Exception("Attempt to stop the install service before it was started")
|
||||||
threading.Thread(target=self._install_next_item, daemon=True).start()
|
self._stop_event.set()
|
||||||
|
with self._install_queue.mutex:
|
||||||
def _install_next_item(self) -> None:
|
self._install_queue.queue.clear() # get rid of pending jobs
|
||||||
done = False
|
active_jobs = [x for x in self.list_jobs() if x.running]
|
||||||
while not done:
|
if active_jobs:
|
||||||
job = self._install_queue.get()
|
self._logger.warning("Waiting for active install job to complete")
|
||||||
if job == STOP_JOB:
|
self.wait_for_installs()
|
||||||
done = True
|
self._download_cache.clear()
|
||||||
continue
|
self._running = False
|
||||||
|
|
||||||
assert job.local_path is not None
|
|
||||||
try:
|
|
||||||
self._signal_job_running(job)
|
|
||||||
if job.inplace:
|
|
||||||
key = self.register_path(job.local_path, job.config_in)
|
|
||||||
else:
|
|
||||||
key = self.install_path(job.local_path, job.config_in)
|
|
||||||
job.config_out = self.record_store.get_model(key)
|
|
||||||
self._signal_job_completed(job)
|
|
||||||
|
|
||||||
except (OSError, DuplicateModelException, InvalidModelConfigException) as excp:
|
|
||||||
self._signal_job_errored(job, excp)
|
|
||||||
finally:
|
|
||||||
self._install_queue.task_done()
|
|
||||||
self._logger.info("Install thread exiting")
|
|
||||||
|
|
||||||
def _signal_job_running(self, job: ModelInstallJob) -> None:
|
|
||||||
job.status = InstallStatus.RUNNING
|
|
||||||
self._logger.info(f"{job.source}: model installation started")
|
|
||||||
if self._event_bus:
|
|
||||||
self._event_bus.emit_model_install_started(str(job.source))
|
|
||||||
|
|
||||||
def _signal_job_completed(self, job: ModelInstallJob) -> None:
|
|
||||||
job.status = InstallStatus.COMPLETED
|
|
||||||
assert job.config_out
|
|
||||||
self._logger.info(
|
|
||||||
f"{job.source}: model installation completed. {job.local_path} registered key {job.config_out.key}"
|
|
||||||
)
|
|
||||||
if self._event_bus:
|
|
||||||
assert job.local_path is not None
|
|
||||||
assert job.config_out is not None
|
|
||||||
key = job.config_out.key
|
|
||||||
self._event_bus.emit_model_install_completed(str(job.source), key)
|
|
||||||
|
|
||||||
def _signal_job_errored(self, job: ModelInstallJob, excp: Exception) -> None:
|
|
||||||
job.set_error(excp)
|
|
||||||
self._logger.info(f"{job.source}: model installation encountered an exception: {job.error_type}")
|
|
||||||
if self._event_bus:
|
|
||||||
error_type = job.error_type
|
|
||||||
error = job.error
|
|
||||||
assert error_type is not None
|
|
||||||
assert error is not None
|
|
||||||
self._event_bus.emit_model_install_error(str(job.source), error_type, error)
|
|
||||||
|
|
||||||
def register_path(
|
def register_path(
|
||||||
self,
|
self,
|
||||||
@ -172,7 +162,12 @@ class ModelInstallService(ModelInstallServiceBase):
|
|||||||
info: AnyModelConfig = self._probe_model(Path(model_path), config)
|
info: AnyModelConfig = self._probe_model(Path(model_path), config)
|
||||||
old_hash = info.original_hash
|
old_hash = info.original_hash
|
||||||
dest_path = self.app_config.models_path / info.base.value / info.type.value / model_path.name
|
dest_path = self.app_config.models_path / info.base.value / info.type.value / model_path.name
|
||||||
new_path = self._copy_model(model_path, dest_path)
|
try:
|
||||||
|
new_path = self._copy_model(model_path, dest_path)
|
||||||
|
except FileExistsError as excp:
|
||||||
|
raise DuplicateModelException(
|
||||||
|
f"A model named {model_path.name} is already installed at {dest_path.as_posix()}"
|
||||||
|
) from excp
|
||||||
new_hash = FastModelHash.hash(new_path)
|
new_hash = FastModelHash.hash(new_path)
|
||||||
assert new_hash == old_hash, f"{model_path}: Model hash changed during installation, possibly corrupted."
|
assert new_hash == old_hash, f"{model_path}: Model hash changed during installation, possibly corrupted."
|
||||||
|
|
||||||
@ -182,43 +177,56 @@ class ModelInstallService(ModelInstallServiceBase):
|
|||||||
info,
|
info,
|
||||||
)
|
)
|
||||||
|
|
||||||
def import_model(
|
def import_model(self, source: ModelSource, config: Optional[Dict[str, Any]] = None) -> ModelInstallJob: # noqa D102
|
||||||
self,
|
if isinstance(source, LocalModelSource):
|
||||||
source: ModelSource,
|
install_job = self._import_local_model(source, config)
|
||||||
config: Optional[Dict[str, Any]] = None,
|
self._install_queue.put(install_job) # synchronously install
|
||||||
) -> ModelInstallJob: # noqa D102
|
elif isinstance(source, CivitaiModelSource):
|
||||||
if not config:
|
install_job = self._import_from_civitai(source, config)
|
||||||
config = {}
|
elif isinstance(source, HFModelSource):
|
||||||
|
install_job = self._import_from_hf(source, config)
|
||||||
|
elif isinstance(source, URLModelSource):
|
||||||
|
install_job = self._import_from_url(source, config)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported model source: '{type(source)}'")
|
||||||
|
|
||||||
# Installing a local path
|
self._install_jobs.append(install_job)
|
||||||
if isinstance(source, LocalModelSource) and Path(source.path).exists(): # a path that is already on disk
|
return install_job
|
||||||
job = ModelInstallJob(
|
|
||||||
source=source,
|
|
||||||
config_in=config,
|
|
||||||
local_path=Path(source.path),
|
|
||||||
)
|
|
||||||
self._install_jobs.append(job)
|
|
||||||
self._install_queue.put(job)
|
|
||||||
return job
|
|
||||||
|
|
||||||
else: # here is where we'd download a URL or repo_id. Implementation pending download queue.
|
|
||||||
raise UnknownModelException("File or directory not found")
|
|
||||||
|
|
||||||
def list_jobs(self) -> List[ModelInstallJob]: # noqa D102
|
def list_jobs(self) -> List[ModelInstallJob]: # noqa D102
|
||||||
return self._install_jobs
|
return self._install_jobs
|
||||||
|
|
||||||
def get_job(self, source: ModelSource) -> List[ModelInstallJob]: # noqa D102
|
def get_job_by_source(self, source: ModelSource) -> List[ModelInstallJob]: # noqa D102
|
||||||
return [x for x in self._install_jobs if x.source == source]
|
return [x for x in self._install_jobs if x.source == source]
|
||||||
|
|
||||||
def wait_for_installs(self) -> List[ModelInstallJob]: # noqa D102
|
def get_job_by_id(self, id: int) -> ModelInstallJob: # noqa D102
|
||||||
|
jobs = [x for x in self._install_jobs if x.id == id]
|
||||||
|
if not jobs:
|
||||||
|
raise ValueError(f"No job with id {id} known")
|
||||||
|
assert len(jobs) == 1
|
||||||
|
assert isinstance(jobs[0], ModelInstallJob)
|
||||||
|
return jobs[0]
|
||||||
|
|
||||||
|
def wait_for_installs(self, timeout: int = 0) -> List[ModelInstallJob]: # noqa D102
|
||||||
|
"""Block until all installation jobs are done."""
|
||||||
|
start = time.time()
|
||||||
|
while len(self._download_cache) > 0:
|
||||||
|
if self._downloads_changed_event.wait(timeout=5): # in case we miss an event
|
||||||
|
self._downloads_changed_event.clear()
|
||||||
|
if timeout > 0 and time.time() - start > timeout:
|
||||||
|
raise Exception("Timeout exceeded")
|
||||||
self._install_queue.join()
|
self._install_queue.join()
|
||||||
return self._install_jobs
|
return self._install_jobs
|
||||||
|
|
||||||
|
def cancel_job(self, job: ModelInstallJob) -> None:
|
||||||
|
"""Cancel the indicated job."""
|
||||||
|
job.cancel()
|
||||||
|
with self._lock:
|
||||||
|
self._cancel_download_parts(job)
|
||||||
|
|
||||||
def prune_jobs(self) -> None:
|
def prune_jobs(self) -> None:
|
||||||
"""Prune all completed and errored jobs."""
|
"""Prune all completed and errored jobs."""
|
||||||
unfinished_jobs = [
|
unfinished_jobs = [x for x in self._install_jobs if not x.in_terminal_state]
|
||||||
x for x in self._install_jobs if x.status not in [InstallStatus.COMPLETED, InstallStatus.ERROR]
|
|
||||||
]
|
|
||||||
self._install_jobs = unfinished_jobs
|
self._install_jobs = unfinished_jobs
|
||||||
|
|
||||||
def sync_to_config(self) -> None:
|
def sync_to_config(self) -> None:
|
||||||
@ -234,10 +242,108 @@ class ModelInstallService(ModelInstallServiceBase):
|
|||||||
self._cached_model_paths = {Path(x.path) for x in self.record_store.all_models()}
|
self._cached_model_paths = {Path(x.path) for x in self.record_store.all_models()}
|
||||||
callback = self._scan_install if install else self._scan_register
|
callback = self._scan_install if install else self._scan_register
|
||||||
search = ModelSearch(on_model_found=callback)
|
search = ModelSearch(on_model_found=callback)
|
||||||
self._models_installed: Set[str] = set()
|
self._models_installed.clear()
|
||||||
search.search(scan_dir)
|
search.search(scan_dir)
|
||||||
return list(self._models_installed)
|
return list(self._models_installed)
|
||||||
|
|
||||||
|
def unregister(self, key: str) -> None: # noqa D102
|
||||||
|
self.record_store.del_model(key)
|
||||||
|
|
||||||
|
def delete(self, key: str) -> None: # noqa D102
|
||||||
|
"""Unregister the model. Delete its files only if they are within our models directory."""
|
||||||
|
model = self.record_store.get_model(key)
|
||||||
|
models_dir = self.app_config.models_path
|
||||||
|
model_path = models_dir / model.path
|
||||||
|
if model_path.is_relative_to(models_dir):
|
||||||
|
self.unconditionally_delete(key)
|
||||||
|
else:
|
||||||
|
self.unregister(key)
|
||||||
|
|
||||||
|
def unconditionally_delete(self, key: str) -> None: # noqa D102
|
||||||
|
model = self.record_store.get_model(key)
|
||||||
|
path = self.app_config.models_path / model.path
|
||||||
|
if path.is_dir():
|
||||||
|
rmtree(path)
|
||||||
|
else:
|
||||||
|
path.unlink()
|
||||||
|
self.unregister(key)
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------------------------------
|
||||||
|
# Internal functions that manage the installer threads
|
||||||
|
# --------------------------------------------------------------------------------------------
|
||||||
|
def _start_installer_thread(self) -> None:
|
||||||
|
threading.Thread(target=self._install_next_item, daemon=True).start()
|
||||||
|
self._running = True
|
||||||
|
|
||||||
|
def _install_next_item(self) -> None:
|
||||||
|
done = False
|
||||||
|
while not done:
|
||||||
|
if self._stop_event.is_set():
|
||||||
|
done = True
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
job = self._install_queue.get(timeout=1)
|
||||||
|
except Empty:
|
||||||
|
continue
|
||||||
|
|
||||||
|
assert job.local_path is not None
|
||||||
|
try:
|
||||||
|
if job.cancelled:
|
||||||
|
self._signal_job_cancelled(job)
|
||||||
|
|
||||||
|
elif job.errored:
|
||||||
|
self._signal_job_errored(job)
|
||||||
|
|
||||||
|
elif (
|
||||||
|
job.waiting or job.downloading
|
||||||
|
): # local jobs will be in waiting state, remote jobs will be downloading state
|
||||||
|
job.total_bytes = self._stat_size(job.local_path)
|
||||||
|
job.bytes = job.total_bytes
|
||||||
|
self._signal_job_running(job)
|
||||||
|
if job.inplace:
|
||||||
|
key = self.register_path(job.local_path, job.config_in)
|
||||||
|
else:
|
||||||
|
key = self.install_path(job.local_path, job.config_in)
|
||||||
|
job.config_out = self.record_store.get_model(key)
|
||||||
|
|
||||||
|
# enter the metadata, if there is any
|
||||||
|
if job.source_metadata:
|
||||||
|
self._metadata_store.add_metadata(key, job.source_metadata)
|
||||||
|
self._signal_job_completed(job)
|
||||||
|
|
||||||
|
except InvalidModelConfigException as excp:
|
||||||
|
if any(x.content_type is not None and "text/html" in x.content_type for x in job.download_parts):
|
||||||
|
job.set_error(
|
||||||
|
InvalidModelConfigException(
|
||||||
|
f"At least one file in {job.local_path} is an HTML page, not a model. This can happen when an access token is required to download."
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
job.set_error(excp)
|
||||||
|
self._signal_job_errored(job)
|
||||||
|
|
||||||
|
except (OSError, DuplicateModelException) as excp:
|
||||||
|
job.set_error(excp)
|
||||||
|
self._signal_job_errored(job)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
# if this is an install of a remote file, then clean up the temporary directory
|
||||||
|
if job._install_tmpdir is not None:
|
||||||
|
rmtree(job._install_tmpdir)
|
||||||
|
self._install_queue.task_done()
|
||||||
|
|
||||||
|
self._logger.info("Install thread exiting")
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------------------------------
|
||||||
|
# Internal functions that manage the models directory
|
||||||
|
# --------------------------------------------------------------------------------------------
|
||||||
|
def _remove_dangling_install_dirs(self) -> None:
|
||||||
|
"""Remove leftover tmpdirs from aborted installs."""
|
||||||
|
path = self._app_config.models_path
|
||||||
|
for tmpdir in path.glob(f"{TMPDIR_PREFIX}*"):
|
||||||
|
self._logger.info(f"Removing dangling temporary directory {tmpdir}")
|
||||||
|
rmtree(tmpdir)
|
||||||
|
|
||||||
def _scan_models_directory(self) -> None:
|
def _scan_models_directory(self) -> None:
|
||||||
"""
|
"""
|
||||||
Scan the models directory for new and missing models.
|
Scan the models directory for new and missing models.
|
||||||
@ -320,28 +426,6 @@ class ModelInstallService(ModelInstallServiceBase):
|
|||||||
pass
|
pass
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def unregister(self, key: str) -> None: # noqa D102
|
|
||||||
self.record_store.del_model(key)
|
|
||||||
|
|
||||||
def delete(self, key: str) -> None: # noqa D102
|
|
||||||
"""Unregister the model. Delete its files only if they are within our models directory."""
|
|
||||||
model = self.record_store.get_model(key)
|
|
||||||
models_dir = self.app_config.models_path
|
|
||||||
model_path = models_dir / model.path
|
|
||||||
if model_path.is_relative_to(models_dir):
|
|
||||||
self.unconditionally_delete(key)
|
|
||||||
else:
|
|
||||||
self.unregister(key)
|
|
||||||
|
|
||||||
def unconditionally_delete(self, key: str) -> None: # noqa D102
|
|
||||||
model = self.record_store.get_model(key)
|
|
||||||
path = self.app_config.models_path / model.path
|
|
||||||
if path.is_dir():
|
|
||||||
rmtree(path)
|
|
||||||
else:
|
|
||||||
path.unlink()
|
|
||||||
self.unregister(key)
|
|
||||||
|
|
||||||
def _copy_model(self, old_path: Path, new_path: Path) -> Path:
|
def _copy_model(self, old_path: Path, new_path: Path) -> Path:
|
||||||
if old_path == new_path:
|
if old_path == new_path:
|
||||||
return old_path
|
return old_path
|
||||||
@ -397,3 +481,279 @@ class ModelInstallService(ModelInstallServiceBase):
|
|||||||
info.config = legacy_conf.relative_to(self.app_config.root_dir).as_posix()
|
info.config = legacy_conf.relative_to(self.app_config.root_dir).as_posix()
|
||||||
self.record_store.add_model(key, info)
|
self.record_store.add_model(key, info)
|
||||||
return key
|
return key
|
||||||
|
|
||||||
|
def _next_id(self) -> int:
|
||||||
|
with self._lock:
|
||||||
|
id = self._next_job_id
|
||||||
|
self._next_job_id += 1
|
||||||
|
return id
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _guess_variant() -> ModelRepoVariant:
|
||||||
|
"""Guess the best HuggingFace variant type to download."""
|
||||||
|
precision = choose_precision(choose_torch_device())
|
||||||
|
return ModelRepoVariant.FP16 if precision == "float16" else ModelRepoVariant.DEFAULT
|
||||||
|
|
||||||
|
def _import_local_model(self, source: LocalModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
|
||||||
|
return ModelInstallJob(
|
||||||
|
id=self._next_id(),
|
||||||
|
source=source,
|
||||||
|
config_in=config or {},
|
||||||
|
local_path=Path(source.path),
|
||||||
|
inplace=source.inplace,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _import_from_civitai(self, source: CivitaiModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
|
||||||
|
if not source.access_token:
|
||||||
|
self._logger.info("No Civitai access token provided; some models may not be downloadable.")
|
||||||
|
metadata = CivitaiMetadataFetch(self._session).from_id(str(source.version_id))
|
||||||
|
assert isinstance(metadata, ModelMetadataWithFiles)
|
||||||
|
remote_files = metadata.download_urls(session=self._session)
|
||||||
|
return self._import_remote_model(source=source, config=config, metadata=metadata, remote_files=remote_files)
|
||||||
|
|
||||||
|
def _import_from_hf(self, source: HFModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
|
||||||
|
# Add user's cached access token to HuggingFace requests
|
||||||
|
source.access_token = source.access_token or HfFolder.get_token()
|
||||||
|
if not source.access_token:
|
||||||
|
self._logger.info("No HuggingFace access token present; some models may not be downloadable.")
|
||||||
|
|
||||||
|
metadata = HuggingFaceMetadataFetch(self._session).from_id(source.repo_id)
|
||||||
|
assert isinstance(metadata, ModelMetadataWithFiles)
|
||||||
|
remote_files = metadata.download_urls(
|
||||||
|
variant=source.variant or self._guess_variant(),
|
||||||
|
subfolder=source.subfolder,
|
||||||
|
session=self._session,
|
||||||
|
)
|
||||||
|
|
||||||
|
return self._import_remote_model(
|
||||||
|
source=source,
|
||||||
|
config=config,
|
||||||
|
remote_files=remote_files,
|
||||||
|
metadata=metadata,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _import_from_url(self, source: URLModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
|
||||||
|
# URLs from Civitai or HuggingFace will be handled specially
|
||||||
|
url_patterns = {
|
||||||
|
r"https?://civitai.com/": CivitaiMetadataFetch,
|
||||||
|
r"https?://huggingface.co/": HuggingFaceMetadataFetch,
|
||||||
|
}
|
||||||
|
metadata = None
|
||||||
|
for pattern, fetcher in url_patterns.items():
|
||||||
|
if re.match(pattern, str(source.url), re.IGNORECASE):
|
||||||
|
metadata = fetcher(self._session).from_url(source.url)
|
||||||
|
break
|
||||||
|
if metadata and isinstance(metadata, ModelMetadataWithFiles):
|
||||||
|
remote_files = metadata.download_urls(session=self._session)
|
||||||
|
else:
|
||||||
|
remote_files = [RemoteModelFile(url=source.url, path=Path("."), size=0)]
|
||||||
|
|
||||||
|
return self._import_remote_model(
|
||||||
|
source=source,
|
||||||
|
config=config,
|
||||||
|
metadata=metadata,
|
||||||
|
remote_files=remote_files,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _import_remote_model(
|
||||||
|
self,
|
||||||
|
source: ModelSource,
|
||||||
|
remote_files: List[RemoteModelFile],
|
||||||
|
metadata: Optional[AnyModelRepoMetadata],
|
||||||
|
config: Optional[Dict[str, Any]],
|
||||||
|
) -> ModelInstallJob:
|
||||||
|
# TODO: Replace with tempfile.tmpdir() when multithreading is cleaned up.
|
||||||
|
# Currently the tmpdir isn't automatically removed at exit because it is
|
||||||
|
# being held in a daemon thread.
|
||||||
|
tmpdir = Path(
|
||||||
|
mkdtemp(
|
||||||
|
dir=self._app_config.models_path,
|
||||||
|
prefix=TMPDIR_PREFIX,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
install_job = ModelInstallJob(
|
||||||
|
id=self._next_id(),
|
||||||
|
source=source,
|
||||||
|
config_in=config or {},
|
||||||
|
source_metadata=metadata,
|
||||||
|
local_path=tmpdir, # local path may change once the download has started due to content-disposition handling
|
||||||
|
bytes=0,
|
||||||
|
total_bytes=0,
|
||||||
|
)
|
||||||
|
# we remember the path up to the top of the tmpdir so that it may be
|
||||||
|
# removed safely at the end of the install process.
|
||||||
|
install_job._install_tmpdir = tmpdir
|
||||||
|
assert install_job.total_bytes is not None # to avoid type checking complaints in the loop below
|
||||||
|
|
||||||
|
self._logger.info(f"Queuing {source} for downloading")
|
||||||
|
for model_file in remote_files:
|
||||||
|
url = model_file.url
|
||||||
|
path = model_file.path
|
||||||
|
self._logger.info(f"Downloading {url} => {path}")
|
||||||
|
install_job.total_bytes += model_file.size
|
||||||
|
assert hasattr(source, "access_token")
|
||||||
|
dest = tmpdir / path.parent
|
||||||
|
dest.mkdir(parents=True, exist_ok=True)
|
||||||
|
download_job = DownloadJob(
|
||||||
|
source=url,
|
||||||
|
dest=dest,
|
||||||
|
access_token=source.access_token,
|
||||||
|
)
|
||||||
|
self._download_cache[download_job.source] = install_job # matches a download job to an install job
|
||||||
|
install_job.download_parts.add(download_job)
|
||||||
|
|
||||||
|
self._download_queue.submit_download_job(
|
||||||
|
download_job,
|
||||||
|
on_start=self._download_started_callback,
|
||||||
|
on_progress=self._download_progress_callback,
|
||||||
|
on_complete=self._download_complete_callback,
|
||||||
|
on_error=self._download_error_callback,
|
||||||
|
on_cancelled=self._download_cancelled_callback,
|
||||||
|
)
|
||||||
|
return install_job
|
||||||
|
|
||||||
|
def _stat_size(self, path: Path) -> int:
|
||||||
|
size = 0
|
||||||
|
if path.is_file():
|
||||||
|
size = path.stat().st_size
|
||||||
|
elif path.is_dir():
|
||||||
|
for root, _, files in os.walk(path):
|
||||||
|
size += sum(self._stat_size(Path(root, x)) for x in files)
|
||||||
|
return size
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Callbacks are executed by the download queue in a separate thread
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
def _download_started_callback(self, download_job: DownloadJob) -> None:
|
||||||
|
self._logger.info(f"{download_job.source}: model download started")
|
||||||
|
with self._lock:
|
||||||
|
install_job = self._download_cache[download_job.source]
|
||||||
|
install_job.status = InstallStatus.DOWNLOADING
|
||||||
|
|
||||||
|
assert download_job.download_path
|
||||||
|
if install_job.local_path == install_job._install_tmpdir:
|
||||||
|
partial_path = download_job.download_path.relative_to(install_job._install_tmpdir)
|
||||||
|
dest_name = partial_path.parts[0]
|
||||||
|
install_job.local_path = install_job._install_tmpdir / dest_name
|
||||||
|
|
||||||
|
# Update the total bytes count for remote sources.
|
||||||
|
if not install_job.total_bytes:
|
||||||
|
install_job.total_bytes = sum(x.total_bytes for x in install_job.download_parts)
|
||||||
|
|
||||||
|
def _download_progress_callback(self, download_job: DownloadJob) -> None:
|
||||||
|
with self._lock:
|
||||||
|
install_job = self._download_cache[download_job.source]
|
||||||
|
if install_job.cancelled: # This catches the case in which the caller directly calls job.cancel()
|
||||||
|
self._cancel_download_parts(install_job)
|
||||||
|
else:
|
||||||
|
# update sizes
|
||||||
|
install_job.bytes = sum(x.bytes for x in install_job.download_parts)
|
||||||
|
self._signal_job_downloading(install_job)
|
||||||
|
|
||||||
|
def _download_complete_callback(self, download_job: DownloadJob) -> None:
|
||||||
|
with self._lock:
|
||||||
|
install_job = self._download_cache[download_job.source]
|
||||||
|
self._download_cache.pop(download_job.source, None)
|
||||||
|
|
||||||
|
# are there any more active jobs left in this task?
|
||||||
|
if all(x.complete for x in install_job.download_parts):
|
||||||
|
# now enqueue job for actual installation into the models directory
|
||||||
|
self._install_queue.put(install_job)
|
||||||
|
|
||||||
|
# Let other threads know that the number of downloads has changed
|
||||||
|
self._downloads_changed_event.set()
|
||||||
|
|
||||||
|
def _download_error_callback(self, download_job: DownloadJob, excp: Optional[Exception] = None) -> None:
|
||||||
|
with self._lock:
|
||||||
|
install_job = self._download_cache.pop(download_job.source, None)
|
||||||
|
assert install_job is not None
|
||||||
|
assert excp is not None
|
||||||
|
install_job.set_error(excp)
|
||||||
|
self._logger.error(
|
||||||
|
f"Cancelling {install_job.source} due to an error while downloading {download_job.source}: {str(excp)}"
|
||||||
|
)
|
||||||
|
self._cancel_download_parts(install_job)
|
||||||
|
|
||||||
|
# Let other threads know that the number of downloads has changed
|
||||||
|
self._downloads_changed_event.set()
|
||||||
|
|
||||||
|
def _download_cancelled_callback(self, download_job: DownloadJob) -> None:
|
||||||
|
with self._lock:
|
||||||
|
install_job = self._download_cache.pop(download_job.source, None)
|
||||||
|
if not install_job:
|
||||||
|
return
|
||||||
|
self._downloads_changed_event.set()
|
||||||
|
self._logger.warning(f"Download {download_job.source} cancelled.")
|
||||||
|
# if install job has already registered an error, then do not replace its status with cancelled
|
||||||
|
if not install_job.errored:
|
||||||
|
install_job.cancel()
|
||||||
|
self._cancel_download_parts(install_job)
|
||||||
|
|
||||||
|
# Let other threads know that the number of downloads has changed
|
||||||
|
self._downloads_changed_event.set()
|
||||||
|
|
||||||
|
def _cancel_download_parts(self, install_job: ModelInstallJob) -> None:
|
||||||
|
# on multipart downloads, _cancel_components() will get called repeatedly from the download callbacks
|
||||||
|
# do not lock here because it gets called within a locked context
|
||||||
|
for s in install_job.download_parts:
|
||||||
|
self._download_queue.cancel_job(s)
|
||||||
|
|
||||||
|
if all(x.in_terminal_state for x in install_job.download_parts):
|
||||||
|
# When all parts have reached their terminal state, we finalize the job to clean up the temporary directory and other resources
|
||||||
|
self._install_queue.put(install_job)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------------------------
|
||||||
|
# Internal methods that put events on the event bus
|
||||||
|
# ------------------------------------------------------------------------------------------------
|
||||||
|
def _signal_job_running(self, job: ModelInstallJob) -> None:
|
||||||
|
job.status = InstallStatus.RUNNING
|
||||||
|
self._logger.info(f"{job.source}: model installation started")
|
||||||
|
if self._event_bus:
|
||||||
|
self._event_bus.emit_model_install_running(str(job.source))
|
||||||
|
|
||||||
|
def _signal_job_downloading(self, job: ModelInstallJob) -> None:
|
||||||
|
if self._event_bus:
|
||||||
|
parts: List[Dict[str, str | int]] = [
|
||||||
|
{
|
||||||
|
"url": str(x.source),
|
||||||
|
"local_path": str(x.download_path),
|
||||||
|
"bytes": x.bytes,
|
||||||
|
"total_bytes": x.total_bytes,
|
||||||
|
}
|
||||||
|
for x in job.download_parts
|
||||||
|
]
|
||||||
|
assert job.bytes is not None
|
||||||
|
assert job.total_bytes is not None
|
||||||
|
self._event_bus.emit_model_install_downloading(
|
||||||
|
str(job.source),
|
||||||
|
local_path=job.local_path.as_posix(),
|
||||||
|
parts=parts,
|
||||||
|
bytes=job.bytes,
|
||||||
|
total_bytes=job.total_bytes,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _signal_job_completed(self, job: ModelInstallJob) -> None:
|
||||||
|
job.status = InstallStatus.COMPLETED
|
||||||
|
assert job.config_out
|
||||||
|
self._logger.info(
|
||||||
|
f"{job.source}: model installation completed. {job.local_path} registered key {job.config_out.key}"
|
||||||
|
)
|
||||||
|
if self._event_bus:
|
||||||
|
assert job.local_path is not None
|
||||||
|
assert job.config_out is not None
|
||||||
|
key = job.config_out.key
|
||||||
|
self._event_bus.emit_model_install_completed(str(job.source), key)
|
||||||
|
|
||||||
|
def _signal_job_errored(self, job: ModelInstallJob) -> None:
|
||||||
|
self._logger.info(f"{job.source}: model installation encountered an exception: {job.error_type}\n{job.error}")
|
||||||
|
if self._event_bus:
|
||||||
|
error_type = job.error_type
|
||||||
|
error = job.error
|
||||||
|
assert error_type is not None
|
||||||
|
assert error is not None
|
||||||
|
self._event_bus.emit_model_install_error(str(job.source), error_type, error)
|
||||||
|
|
||||||
|
def _signal_job_cancelled(self, job: ModelInstallJob) -> None:
|
||||||
|
self._logger.info(f"{job.source}: model installation was cancelled")
|
||||||
|
if self._event_bus:
|
||||||
|
self._event_bus.emit_model_install_cancelled(str(job.source))
|
||||||
|
@ -4,6 +4,8 @@ from .model_records_base import ( # noqa F401
|
|||||||
InvalidModelException,
|
InvalidModelException,
|
||||||
ModelRecordServiceBase,
|
ModelRecordServiceBase,
|
||||||
UnknownModelException,
|
UnknownModelException,
|
||||||
|
ModelSummary,
|
||||||
|
ModelRecordOrderBy,
|
||||||
)
|
)
|
||||||
from .model_records_sql import ModelRecordServiceSQL # noqa F401
|
from .model_records_sql import ModelRecordServiceSQL # noqa F401
|
||||||
|
|
||||||
@ -13,4 +15,6 @@ __all__ = [
|
|||||||
"DuplicateModelException",
|
"DuplicateModelException",
|
||||||
"InvalidModelException",
|
"InvalidModelException",
|
||||||
"UnknownModelException",
|
"UnknownModelException",
|
||||||
|
"ModelSummary",
|
||||||
|
"ModelRecordOrderBy",
|
||||||
]
|
]
|
||||||
|
@ -4,10 +4,15 @@ Abstract base class for storing and retrieving model configuration records.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
|
from enum import Enum
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Optional, Union
|
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from invokeai.app.services.shared.pagination import PaginatedResults
|
||||||
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelFormat, ModelType
|
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelFormat, ModelType
|
||||||
|
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata, ModelMetadataStore
|
||||||
|
|
||||||
|
|
||||||
class DuplicateModelException(Exception):
|
class DuplicateModelException(Exception):
|
||||||
@ -26,11 +31,33 @@ class ConfigFileVersionMismatchException(Exception):
|
|||||||
"""Raised on an attempt to open a config with an incompatible version."""
|
"""Raised on an attempt to open a config with an incompatible version."""
|
||||||
|
|
||||||
|
|
||||||
|
class ModelRecordOrderBy(str, Enum):
|
||||||
|
"""The order in which to return model summaries."""
|
||||||
|
|
||||||
|
Default = "default" # order by type, base, format and name
|
||||||
|
Type = "type"
|
||||||
|
Base = "base"
|
||||||
|
Name = "name"
|
||||||
|
Format = "format"
|
||||||
|
|
||||||
|
|
||||||
|
class ModelSummary(BaseModel):
|
||||||
|
"""A short summary of models for UI listing purposes."""
|
||||||
|
|
||||||
|
key: str = Field(description="model key")
|
||||||
|
type: ModelType = Field(description="model type")
|
||||||
|
base: BaseModelType = Field(description="base model")
|
||||||
|
format: ModelFormat = Field(description="model format")
|
||||||
|
name: str = Field(description="model name")
|
||||||
|
description: str = Field(description="short description of model")
|
||||||
|
tags: Set[str] = Field(description="tags associated with model")
|
||||||
|
|
||||||
|
|
||||||
class ModelRecordServiceBase(ABC):
|
class ModelRecordServiceBase(ABC):
|
||||||
"""Abstract base class for storage and retrieval of model configs."""
|
"""Abstract base class for storage and retrieval of model configs."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def add_model(self, key: str, config: Union[dict, AnyModelConfig]) -> AnyModelConfig:
|
def add_model(self, key: str, config: Union[Dict[str, Any], AnyModelConfig]) -> AnyModelConfig:
|
||||||
"""
|
"""
|
||||||
Add a model to the database.
|
Add a model to the database.
|
||||||
|
|
||||||
@ -54,7 +81,7 @@ class ModelRecordServiceBase(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def update_model(self, key: str, config: Union[dict, AnyModelConfig]) -> AnyModelConfig:
|
def update_model(self, key: str, config: Union[Dict[str, Any], AnyModelConfig]) -> AnyModelConfig:
|
||||||
"""
|
"""
|
||||||
Update the model, returning the updated version.
|
Update the model, returning the updated version.
|
||||||
|
|
||||||
@ -75,6 +102,47 @@ class ModelRecordServiceBase(ABC):
|
|||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def metadata_store(self) -> ModelMetadataStore:
|
||||||
|
"""Return a ModelMetadataStore initialized on the same database."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_metadata(self, key: str) -> Optional[AnyModelRepoMetadata]:
|
||||||
|
"""
|
||||||
|
Retrieve metadata (if any) from when model was downloaded from a repo.
|
||||||
|
|
||||||
|
:param key: Model key
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def list_all_metadata(self) -> List[Tuple[str, AnyModelRepoMetadata]]:
|
||||||
|
"""List metadata for all models that have it."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def search_by_metadata_tag(self, tags: Set[str]) -> List[AnyModelConfig]:
|
||||||
|
"""
|
||||||
|
Search model metadata for ones with all listed tags and return their corresponding configs.
|
||||||
|
|
||||||
|
:param tags: Set of tags to search for. All tags must be present.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def list_tags(self) -> Set[str]:
|
||||||
|
"""Return a unique set of all the model tags in the metadata database."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def list_models(
|
||||||
|
self, page: int = 0, per_page: int = 10, order_by: ModelRecordOrderBy = ModelRecordOrderBy.Default
|
||||||
|
) -> PaginatedResults[ModelSummary]:
|
||||||
|
"""Return a paginated summary listing of each model in the database."""
|
||||||
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def exists(self, key: str) -> bool:
|
def exists(self, key: str) -> bool:
|
||||||
"""
|
"""
|
||||||
|
@ -42,9 +42,11 @@ Typical usage:
|
|||||||
|
|
||||||
import json
|
import json
|
||||||
import sqlite3
|
import sqlite3
|
||||||
|
from math import ceil
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Optional, Union
|
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
||||||
|
|
||||||
|
from invokeai.app.services.shared.pagination import PaginatedResults
|
||||||
from invokeai.backend.model_manager.config import (
|
from invokeai.backend.model_manager.config import (
|
||||||
AnyModelConfig,
|
AnyModelConfig,
|
||||||
BaseModelType,
|
BaseModelType,
|
||||||
@ -52,11 +54,14 @@ from invokeai.backend.model_manager.config import (
|
|||||||
ModelFormat,
|
ModelFormat,
|
||||||
ModelType,
|
ModelType,
|
||||||
)
|
)
|
||||||
|
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata, ModelMetadataStore, UnknownMetadataException
|
||||||
|
|
||||||
from ..shared.sqlite.sqlite_database import SqliteDatabase
|
from ..shared.sqlite.sqlite_database import SqliteDatabase
|
||||||
from .model_records_base import (
|
from .model_records_base import (
|
||||||
DuplicateModelException,
|
DuplicateModelException,
|
||||||
|
ModelRecordOrderBy,
|
||||||
ModelRecordServiceBase,
|
ModelRecordServiceBase,
|
||||||
|
ModelSummary,
|
||||||
UnknownModelException,
|
UnknownModelException,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -64,9 +69,6 @@ from .model_records_base import (
|
|||||||
class ModelRecordServiceSQL(ModelRecordServiceBase):
|
class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||||
"""Implementation of the ModelConfigStore ABC using a SQL database."""
|
"""Implementation of the ModelConfigStore ABC using a SQL database."""
|
||||||
|
|
||||||
_db: SqliteDatabase
|
|
||||||
_cursor: sqlite3.Cursor
|
|
||||||
|
|
||||||
def __init__(self, db: SqliteDatabase):
|
def __init__(self, db: SqliteDatabase):
|
||||||
"""
|
"""
|
||||||
Initialize a new object from preexisting sqlite3 connection and threading lock objects.
|
Initialize a new object from preexisting sqlite3 connection and threading lock objects.
|
||||||
@ -78,7 +80,12 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
|||||||
self._db = db
|
self._db = db
|
||||||
self._cursor = self._db.conn.cursor()
|
self._cursor = self._db.conn.cursor()
|
||||||
|
|
||||||
def add_model(self, key: str, config: Union[dict, AnyModelConfig]) -> AnyModelConfig:
|
@property
|
||||||
|
def db(self) -> SqliteDatabase:
|
||||||
|
"""Return the underlying database."""
|
||||||
|
return self._db
|
||||||
|
|
||||||
|
def add_model(self, key: str, config: Union[Dict[str, Any], AnyModelConfig]) -> AnyModelConfig:
|
||||||
"""
|
"""
|
||||||
Add a model to the database.
|
Add a model to the database.
|
||||||
|
|
||||||
@ -293,3 +300,95 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
|||||||
)
|
)
|
||||||
results = [ModelConfigFactory.make_config(json.loads(x[0])) for x in self._cursor.fetchall()]
|
results = [ModelConfigFactory.make_config(json.loads(x[0])) for x in self._cursor.fetchall()]
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
@property
|
||||||
|
def metadata_store(self) -> ModelMetadataStore:
|
||||||
|
"""Return a ModelMetadataStore initialized on the same database."""
|
||||||
|
return ModelMetadataStore(self._db)
|
||||||
|
|
||||||
|
def get_metadata(self, key: str) -> Optional[AnyModelRepoMetadata]:
|
||||||
|
"""
|
||||||
|
Retrieve metadata (if any) from when model was downloaded from a repo.
|
||||||
|
|
||||||
|
:param key: Model key
|
||||||
|
"""
|
||||||
|
store = self.metadata_store
|
||||||
|
try:
|
||||||
|
metadata = store.get_metadata(key)
|
||||||
|
return metadata
|
||||||
|
except UnknownMetadataException:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def search_by_metadata_tag(self, tags: Set[str]) -> List[AnyModelConfig]:
|
||||||
|
"""
|
||||||
|
Search model metadata for ones with all listed tags and return their corresponding configs.
|
||||||
|
|
||||||
|
:param tags: Set of tags to search for. All tags must be present.
|
||||||
|
"""
|
||||||
|
store = ModelMetadataStore(self._db)
|
||||||
|
keys = store.search_by_tag(tags)
|
||||||
|
return [self.get_model(x) for x in keys]
|
||||||
|
|
||||||
|
def list_tags(self) -> Set[str]:
|
||||||
|
"""Return a unique set of all the model tags in the metadata database."""
|
||||||
|
store = ModelMetadataStore(self._db)
|
||||||
|
return store.list_tags()
|
||||||
|
|
||||||
|
def list_all_metadata(self) -> List[Tuple[str, AnyModelRepoMetadata]]:
|
||||||
|
"""List metadata for all models that have it."""
|
||||||
|
store = ModelMetadataStore(self._db)
|
||||||
|
return store.list_all_metadata()
|
||||||
|
|
||||||
|
def list_models(
|
||||||
|
self, page: int = 0, per_page: int = 10, order_by: ModelRecordOrderBy = ModelRecordOrderBy.Default
|
||||||
|
) -> PaginatedResults[ModelSummary]:
|
||||||
|
"""Return a paginated summary listing of each model in the database."""
|
||||||
|
ordering = {
|
||||||
|
ModelRecordOrderBy.Default: "a.type, a.base, a.format, a.name",
|
||||||
|
ModelRecordOrderBy.Type: "a.type",
|
||||||
|
ModelRecordOrderBy.Base: "a.base",
|
||||||
|
ModelRecordOrderBy.Name: "a.name",
|
||||||
|
ModelRecordOrderBy.Format: "a.format",
|
||||||
|
}
|
||||||
|
|
||||||
|
def _fixup(summary: Dict[str, str]) -> Dict[str, Union[str, int, Set[str]]]:
|
||||||
|
"""Fix up results so that there are no null values."""
|
||||||
|
result: Dict[str, Union[str, int, Set[str]]] = {}
|
||||||
|
for key, item in summary.items():
|
||||||
|
result[key] = item or ""
|
||||||
|
result["tags"] = set(json.loads(summary["tags"] or "[]"))
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Lock so that the database isn't updated while we're doing the two queries.
|
||||||
|
with self._db.lock:
|
||||||
|
# query1: get the total number of model configs
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
select count(*) from model_config;
|
||||||
|
""",
|
||||||
|
(),
|
||||||
|
)
|
||||||
|
total = int(self._cursor.fetchone()[0])
|
||||||
|
|
||||||
|
# query2: fetch key fields from the join of model_config and model_metadata
|
||||||
|
self._cursor.execute(
|
||||||
|
f"""--sql
|
||||||
|
SELECT a.id as key, a.type, a.base, a.format, a.name,
|
||||||
|
json_extract(a.config, '$.description') as description,
|
||||||
|
json_extract(b.metadata, '$.tags') as tags
|
||||||
|
FROM model_config AS a
|
||||||
|
LEFT JOIN model_metadata AS b on a.id=b.id
|
||||||
|
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason
|
||||||
|
LIMIT ?
|
||||||
|
OFFSET ?;
|
||||||
|
""",
|
||||||
|
(
|
||||||
|
per_page,
|
||||||
|
page * per_page,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
rows = self._cursor.fetchall()
|
||||||
|
items = [ModelSummary.model_validate(_fixup(dict(x))) for x in rows]
|
||||||
|
return PaginatedResults(
|
||||||
|
page=page, pages=ceil(total / per_page), per_page=per_page, total=total, items=items
|
||||||
|
)
|
||||||
|
@ -6,6 +6,7 @@ from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
|||||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_1 import build_migration_1
|
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_1 import build_migration_1
|
||||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_2 import build_migration_2
|
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_2 import build_migration_2
|
||||||
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_3 import build_migration_3
|
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_3 import build_migration_3
|
||||||
|
from invokeai.app.services.shared.sqlite_migrator.migrations.migration_4 import build_migration_4
|
||||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
|
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_impl import SqliteMigrator
|
||||||
|
|
||||||
|
|
||||||
@ -28,7 +29,8 @@ def init_db(config: InvokeAIAppConfig, logger: Logger, image_files: ImageFileSto
|
|||||||
migrator = SqliteMigrator(db=db)
|
migrator = SqliteMigrator(db=db)
|
||||||
migrator.register_migration(build_migration_1())
|
migrator.register_migration(build_migration_1())
|
||||||
migrator.register_migration(build_migration_2(image_files=image_files, logger=logger))
|
migrator.register_migration(build_migration_2(image_files=image_files, logger=logger))
|
||||||
migrator.register_migration(build_migration_3())
|
migrator.register_migration(build_migration_3(app_config=config, logger=logger))
|
||||||
|
migrator.register_migration(build_migration_4())
|
||||||
migrator.run_migrations()
|
migrator.run_migrations()
|
||||||
|
|
||||||
return db
|
return db
|
||||||
|
@ -11,8 +11,6 @@ from invokeai.app.services.workflow_records.workflow_records_common import (
|
|||||||
UnsafeWorkflowWithVersionValidator,
|
UnsafeWorkflowWithVersionValidator,
|
||||||
)
|
)
|
||||||
|
|
||||||
from .util.migrate_yaml_config_1 import MigrateModelYamlToDb1
|
|
||||||
|
|
||||||
|
|
||||||
class Migration2Callback:
|
class Migration2Callback:
|
||||||
def __init__(self, image_files: ImageFileStorageBase, logger: Logger):
|
def __init__(self, image_files: ImageFileStorageBase, logger: Logger):
|
||||||
@ -25,8 +23,6 @@ class Migration2Callback:
|
|||||||
self._drop_old_workflow_tables(cursor)
|
self._drop_old_workflow_tables(cursor)
|
||||||
self._add_workflow_library(cursor)
|
self._add_workflow_library(cursor)
|
||||||
self._drop_model_manager_metadata(cursor)
|
self._drop_model_manager_metadata(cursor)
|
||||||
self._recreate_model_config(cursor)
|
|
||||||
self._migrate_model_config_records(cursor)
|
|
||||||
self._migrate_embedded_workflows(cursor)
|
self._migrate_embedded_workflows(cursor)
|
||||||
|
|
||||||
def _add_images_has_workflow(self, cursor: sqlite3.Cursor) -> None:
|
def _add_images_has_workflow(self, cursor: sqlite3.Cursor) -> None:
|
||||||
@ -100,45 +96,6 @@ class Migration2Callback:
|
|||||||
"""Drops the `model_manager_metadata` table."""
|
"""Drops the `model_manager_metadata` table."""
|
||||||
cursor.execute("DROP TABLE IF EXISTS model_manager_metadata;")
|
cursor.execute("DROP TABLE IF EXISTS model_manager_metadata;")
|
||||||
|
|
||||||
def _recreate_model_config(self, cursor: sqlite3.Cursor) -> None:
|
|
||||||
"""
|
|
||||||
Drops the `model_config` table, recreating it.
|
|
||||||
|
|
||||||
In 3.4.0, this table used explicit columns but was changed to use json_extract 3.5.0.
|
|
||||||
|
|
||||||
Because this table is not used in production, we are able to simply drop it and recreate it.
|
|
||||||
"""
|
|
||||||
|
|
||||||
cursor.execute("DROP TABLE IF EXISTS model_config;")
|
|
||||||
|
|
||||||
cursor.execute(
|
|
||||||
"""--sql
|
|
||||||
CREATE TABLE IF NOT EXISTS model_config (
|
|
||||||
id TEXT NOT NULL PRIMARY KEY,
|
|
||||||
-- The next 3 fields are enums in python, unrestricted string here
|
|
||||||
base TEXT GENERATED ALWAYS as (json_extract(config, '$.base')) VIRTUAL NOT NULL,
|
|
||||||
type TEXT GENERATED ALWAYS as (json_extract(config, '$.type')) VIRTUAL NOT NULL,
|
|
||||||
name TEXT GENERATED ALWAYS as (json_extract(config, '$.name')) VIRTUAL NOT NULL,
|
|
||||||
path TEXT GENERATED ALWAYS as (json_extract(config, '$.path')) VIRTUAL NOT NULL,
|
|
||||||
format TEXT GENERATED ALWAYS as (json_extract(config, '$.format')) VIRTUAL NOT NULL,
|
|
||||||
original_hash TEXT, -- could be null
|
|
||||||
-- Serialized JSON representation of the whole config object,
|
|
||||||
-- which will contain additional fields from subclasses
|
|
||||||
config TEXT NOT NULL,
|
|
||||||
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
|
||||||
-- Updated via trigger
|
|
||||||
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
|
||||||
-- unique constraint on combo of name, base and type
|
|
||||||
UNIQUE(name, base, type)
|
|
||||||
);
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
def _migrate_model_config_records(self, cursor: sqlite3.Cursor) -> None:
|
|
||||||
"""After updating the model config table, we repopulate it."""
|
|
||||||
model_record_migrator = MigrateModelYamlToDb1(cursor)
|
|
||||||
model_record_migrator.migrate()
|
|
||||||
|
|
||||||
def _migrate_embedded_workflows(self, cursor: sqlite3.Cursor) -> None:
|
def _migrate_embedded_workflows(self, cursor: sqlite3.Cursor) -> None:
|
||||||
"""
|
"""
|
||||||
In the v3.5.0 release, InvokeAI changed how it handles embedded workflows. The `images` table in
|
In the v3.5.0 release, InvokeAI changed how it handles embedded workflows. The `images` table in
|
||||||
|
@ -1,13 +1,16 @@
|
|||||||
import sqlite3
|
import sqlite3
|
||||||
|
from logging import Logger
|
||||||
|
|
||||||
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||||
|
|
||||||
from .util.migrate_yaml_config_1 import MigrateModelYamlToDb1
|
from .util.migrate_yaml_config_1 import MigrateModelYamlToDb1
|
||||||
|
|
||||||
|
|
||||||
class Migration3Callback:
|
class Migration3Callback:
|
||||||
def __init__(self) -> None:
|
def __init__(self, app_config: InvokeAIAppConfig, logger: Logger) -> None:
|
||||||
pass
|
self._app_config = app_config
|
||||||
|
self._logger = logger
|
||||||
|
|
||||||
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||||
self._drop_model_manager_metadata(cursor)
|
self._drop_model_manager_metadata(cursor)
|
||||||
@ -54,11 +57,12 @@ class Migration3Callback:
|
|||||||
|
|
||||||
def _migrate_model_config_records(self, cursor: sqlite3.Cursor) -> None:
|
def _migrate_model_config_records(self, cursor: sqlite3.Cursor) -> None:
|
||||||
"""After updating the model config table, we repopulate it."""
|
"""After updating the model config table, we repopulate it."""
|
||||||
model_record_migrator = MigrateModelYamlToDb1(cursor)
|
self._logger.info("Migrating model config records from models.yaml to database")
|
||||||
|
model_record_migrator = MigrateModelYamlToDb1(self._app_config, self._logger, cursor)
|
||||||
model_record_migrator.migrate()
|
model_record_migrator.migrate()
|
||||||
|
|
||||||
|
|
||||||
def build_migration_3() -> Migration:
|
def build_migration_3(app_config: InvokeAIAppConfig, logger: Logger) -> Migration:
|
||||||
"""
|
"""
|
||||||
Build the migration from database version 2 to 3.
|
Build the migration from database version 2 to 3.
|
||||||
|
|
||||||
@ -69,7 +73,7 @@ def build_migration_3() -> Migration:
|
|||||||
migration_3 = Migration(
|
migration_3 = Migration(
|
||||||
from_version=2,
|
from_version=2,
|
||||||
to_version=3,
|
to_version=3,
|
||||||
callback=Migration3Callback(),
|
callback=Migration3Callback(app_config=app_config, logger=logger),
|
||||||
)
|
)
|
||||||
|
|
||||||
return migration_3
|
return migration_3
|
||||||
|
@ -0,0 +1,83 @@
|
|||||||
|
import sqlite3
|
||||||
|
|
||||||
|
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||||
|
|
||||||
|
|
||||||
|
class Migration4Callback:
|
||||||
|
"""Callback to do step 4 of migration."""
|
||||||
|
|
||||||
|
def __call__(self, cursor: sqlite3.Cursor) -> None: # noqa D102
|
||||||
|
self._create_model_metadata(cursor)
|
||||||
|
self._create_model_tags(cursor)
|
||||||
|
self._create_tags(cursor)
|
||||||
|
self._create_triggers(cursor)
|
||||||
|
|
||||||
|
def _create_model_metadata(self, cursor: sqlite3.Cursor) -> None:
|
||||||
|
"""Create the table used to store model metadata downloaded from remote sources."""
|
||||||
|
cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
CREATE TABLE IF NOT EXISTS model_metadata (
|
||||||
|
id TEXT NOT NULL PRIMARY KEY,
|
||||||
|
name TEXT GENERATED ALWAYS AS (json_extract(metadata, '$.name')) VIRTUAL NOT NULL,
|
||||||
|
author TEXT GENERATED ALWAYS AS (json_extract(metadata, '$.author')) VIRTUAL NOT NULL,
|
||||||
|
-- Serialized JSON representation of the whole metadata object,
|
||||||
|
-- which will contain additional fields from subclasses
|
||||||
|
metadata TEXT NOT NULL,
|
||||||
|
created_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||||
|
-- Updated via trigger
|
||||||
|
updated_at DATETIME NOT NULL DEFAULT(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')),
|
||||||
|
FOREIGN KEY(id) REFERENCES model_config(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
def _create_model_tags(self, cursor: sqlite3.Cursor) -> None:
|
||||||
|
cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
CREATE TABLE IF NOT EXISTS model_tags (
|
||||||
|
model_id TEXT NOT NULL,
|
||||||
|
tag_id INTEGER NOT NULL,
|
||||||
|
FOREIGN KEY(model_id) REFERENCES model_config(id) ON DELETE CASCADE,
|
||||||
|
FOREIGN KEY(tag_id) REFERENCES tags(tag_id) ON DELETE CASCADE,
|
||||||
|
UNIQUE(model_id,tag_id)
|
||||||
|
);
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
def _create_tags(self, cursor: sqlite3.Cursor) -> None:
|
||||||
|
cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
CREATE TABLE IF NOT EXISTS tags (
|
||||||
|
tag_id INTEGER NOT NULL PRIMARY KEY,
|
||||||
|
tag_text TEXT NOT NULL UNIQUE
|
||||||
|
);
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
def _create_triggers(self, cursor: sqlite3.Cursor) -> None:
|
||||||
|
cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
CREATE TRIGGER IF NOT EXISTS model_metadata_updated_at
|
||||||
|
AFTER UPDATE
|
||||||
|
ON model_metadata FOR EACH ROW
|
||||||
|
BEGIN
|
||||||
|
UPDATE model_metadata SET updated_at = STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')
|
||||||
|
WHERE id = old.id;
|
||||||
|
END;
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def build_migration_4() -> Migration:
|
||||||
|
"""
|
||||||
|
Build the migration from database version 3 to 4.
|
||||||
|
|
||||||
|
Adds the tables needed to store model metadata and tags.
|
||||||
|
"""
|
||||||
|
migration_4 = Migration(
|
||||||
|
from_version=3,
|
||||||
|
to_version=4,
|
||||||
|
callback=Migration4Callback(),
|
||||||
|
)
|
||||||
|
|
||||||
|
return migration_4
|
@ -23,7 +23,6 @@ from invokeai.backend.model_manager.config import (
|
|||||||
ModelType,
|
ModelType,
|
||||||
)
|
)
|
||||||
from invokeai.backend.model_manager.hash import FastModelHash
|
from invokeai.backend.model_manager.hash import FastModelHash
|
||||||
from invokeai.backend.util.logging import InvokeAILogger
|
|
||||||
|
|
||||||
ModelsValidator = TypeAdapter(AnyModelConfig)
|
ModelsValidator = TypeAdapter(AnyModelConfig)
|
||||||
|
|
||||||
@ -46,10 +45,9 @@ class MigrateModelYamlToDb1:
|
|||||||
logger: Logger
|
logger: Logger
|
||||||
cursor: sqlite3.Cursor
|
cursor: sqlite3.Cursor
|
||||||
|
|
||||||
def __init__(self, cursor: sqlite3.Cursor = None) -> None:
|
def __init__(self, config: InvokeAIAppConfig, logger: Logger, cursor: sqlite3.Cursor = None) -> None:
|
||||||
self.config = InvokeAIAppConfig.get_config()
|
self.config = config
|
||||||
self.config.parse_args()
|
self.logger = logger
|
||||||
self.logger = InvokeAILogger.get_logger()
|
|
||||||
self.cursor = cursor
|
self.cursor = cursor
|
||||||
|
|
||||||
def get_yaml(self) -> DictConfig:
|
def get_yaml(self) -> DictConfig:
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
{
|
{
|
||||||
"id": "6bfa0b3a-7090-4cd9-ad2d-a4b8662b6e71",
|
|
||||||
"name": "ESRGAN Upscaling with Canny ControlNet",
|
"name": "ESRGAN Upscaling with Canny ControlNet",
|
||||||
"author": "InvokeAI",
|
"author": "InvokeAI",
|
||||||
"description": "Sample workflow for using Upscaling with ControlNet with SD1.5",
|
"description": "Sample workflow for using Upscaling with ControlNet with SD1.5",
|
||||||
@ -77,12 +76,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 256,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 1250,
|
"x": 1250,
|
||||||
"y": 1500
|
"y": 1500
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 219
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "d8ace142-c05f-4f1d-8982-88dc7473958d",
|
"id": "d8ace142-c05f-4f1d-8982-88dc7473958d",
|
||||||
@ -148,12 +147,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 227,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 700,
|
"x": 700,
|
||||||
"y": 1375
|
"y": 1375
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 193
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "771bdf6a-0813-4099-a5d8-921a138754d4",
|
"id": "771bdf6a-0813-4099-a5d8-921a138754d4",
|
||||||
@ -214,12 +213,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 225,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 375,
|
"x": 375,
|
||||||
"y": 1900
|
"y": 1900
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 189
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "f7564dd2-9539-47f2-ac13-190804461f4e",
|
"id": "f7564dd2-9539-47f2-ac13-190804461f4e",
|
||||||
@ -315,12 +314,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 340,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 775,
|
"x": 775,
|
||||||
"y": 1900
|
"y": 1900
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 295
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "1d887701-df21-4966-ae6e-a7d82307d7bd",
|
"id": "1d887701-df21-4966-ae6e-a7d82307d7bd",
|
||||||
@ -416,12 +415,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 340,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 1200,
|
"x": 1200,
|
||||||
"y": 1900
|
"y": 1900
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 293
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "ca1d020c-89a8-4958-880a-016d28775cfa",
|
"id": "ca1d020c-89a8-4958-880a-016d28775cfa",
|
||||||
@ -434,7 +433,7 @@
|
|||||||
"notes": "",
|
"notes": "",
|
||||||
"isIntermediate": true,
|
"isIntermediate": true,
|
||||||
"useCache": true,
|
"useCache": true,
|
||||||
"version": "1.1.0",
|
"version": "1.1.1",
|
||||||
"nodePack": "invokeai",
|
"nodePack": "invokeai",
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"image": {
|
"image": {
|
||||||
@ -537,12 +536,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 511,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 1650,
|
"x": 1650,
|
||||||
"y": 1900
|
"y": 1900
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 451
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "f50624ce-82bf-41d0-bdf7-8aab11a80d48",
|
"id": "f50624ce-82bf-41d0-bdf7-8aab11a80d48",
|
||||||
@ -640,12 +639,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 1650,
|
"x": 1650,
|
||||||
"y": 1775
|
"y": 1775
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 24
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "c3737554-8d87-48ff-a6f8-e71d2867f434",
|
"id": "c3737554-8d87-48ff-a6f8-e71d2867f434",
|
||||||
@ -658,7 +657,7 @@
|
|||||||
"notes": "",
|
"notes": "",
|
||||||
"isIntermediate": true,
|
"isIntermediate": true,
|
||||||
"useCache": true,
|
"useCache": true,
|
||||||
"version": "1.5.0",
|
"version": "1.5.1",
|
||||||
"nodePack": "invokeai",
|
"nodePack": "invokeai",
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"positive_conditioning": {
|
"positive_conditioning": {
|
||||||
@ -866,12 +865,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 705,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 2128.740065979906,
|
"x": 2128.740065979906,
|
||||||
"y": 1232.6219060454753
|
"y": 1232.6219060454753
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 612
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "3ed9b2ef-f4ec-40a7-94db-92e63b583ec0",
|
"id": "3ed9b2ef-f4ec-40a7-94db-92e63b583ec0",
|
||||||
@ -978,12 +977,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 267,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 2559.4751127537957,
|
"x": 2559.4751127537957,
|
||||||
"y": 1246.6000376741406
|
"y": 1246.6000376741406
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 224
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "5ca498a4-c8c8-4580-a396-0c984317205d",
|
"id": "5ca498a4-c8c8-4580-a396-0c984317205d",
|
||||||
@ -1079,12 +1078,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 1650,
|
"x": 1650,
|
||||||
"y": 1675
|
"y": 1675
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 24
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16b",
|
"id": "63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16b",
|
||||||
@ -1137,12 +1136,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 256,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 1250,
|
"x": 1250,
|
||||||
"y": 1200
|
"y": 1200
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 219
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "eb8f6f8a-c7b1-4914-806e-045ee2717a35",
|
"id": "eb8f6f8a-c7b1-4914-806e-045ee2717a35",
|
||||||
@ -1195,168 +1194,168 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 1650,
|
"x": 1650,
|
||||||
"y": 1600
|
"y": 1600
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 24
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"edges": [
|
"edges": [
|
||||||
{
|
{
|
||||||
"id": "5ca498a4-c8c8-4580-a396-0c984317205d-f50624ce-82bf-41d0-bdf7-8aab11a80d48-collapsed",
|
"id": "5ca498a4-c8c8-4580-a396-0c984317205d-f50624ce-82bf-41d0-bdf7-8aab11a80d48-collapsed",
|
||||||
|
"type": "collapsed",
|
||||||
"source": "5ca498a4-c8c8-4580-a396-0c984317205d",
|
"source": "5ca498a4-c8c8-4580-a396-0c984317205d",
|
||||||
"target": "f50624ce-82bf-41d0-bdf7-8aab11a80d48",
|
"target": "f50624ce-82bf-41d0-bdf7-8aab11a80d48"
|
||||||
"type": "collapsed"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "eb8f6f8a-c7b1-4914-806e-045ee2717a35-f50624ce-82bf-41d0-bdf7-8aab11a80d48-collapsed",
|
"id": "eb8f6f8a-c7b1-4914-806e-045ee2717a35-f50624ce-82bf-41d0-bdf7-8aab11a80d48-collapsed",
|
||||||
|
"type": "collapsed",
|
||||||
"source": "eb8f6f8a-c7b1-4914-806e-045ee2717a35",
|
"source": "eb8f6f8a-c7b1-4914-806e-045ee2717a35",
|
||||||
"target": "f50624ce-82bf-41d0-bdf7-8aab11a80d48",
|
"target": "f50624ce-82bf-41d0-bdf7-8aab11a80d48"
|
||||||
"type": "collapsed"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-771bdf6a-0813-4099-a5d8-921a138754d4image-f7564dd2-9539-47f2-ac13-190804461f4eimage",
|
"id": "reactflow__edge-771bdf6a-0813-4099-a5d8-921a138754d4image-f7564dd2-9539-47f2-ac13-190804461f4eimage",
|
||||||
|
"type": "default",
|
||||||
"source": "771bdf6a-0813-4099-a5d8-921a138754d4",
|
"source": "771bdf6a-0813-4099-a5d8-921a138754d4",
|
||||||
"target": "f7564dd2-9539-47f2-ac13-190804461f4e",
|
"target": "f7564dd2-9539-47f2-ac13-190804461f4e",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "image",
|
"sourceHandle": "image",
|
||||||
"targetHandle": "image"
|
"targetHandle": "image"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-f7564dd2-9539-47f2-ac13-190804461f4eimage-1d887701-df21-4966-ae6e-a7d82307d7bdimage",
|
"id": "reactflow__edge-f7564dd2-9539-47f2-ac13-190804461f4eimage-1d887701-df21-4966-ae6e-a7d82307d7bdimage",
|
||||||
|
"type": "default",
|
||||||
"source": "f7564dd2-9539-47f2-ac13-190804461f4e",
|
"source": "f7564dd2-9539-47f2-ac13-190804461f4e",
|
||||||
"target": "1d887701-df21-4966-ae6e-a7d82307d7bd",
|
"target": "1d887701-df21-4966-ae6e-a7d82307d7bd",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "image",
|
"sourceHandle": "image",
|
||||||
"targetHandle": "image"
|
"targetHandle": "image"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-5ca498a4-c8c8-4580-a396-0c984317205dwidth-f50624ce-82bf-41d0-bdf7-8aab11a80d48width",
|
"id": "reactflow__edge-5ca498a4-c8c8-4580-a396-0c984317205dwidth-f50624ce-82bf-41d0-bdf7-8aab11a80d48width",
|
||||||
|
"type": "default",
|
||||||
"source": "5ca498a4-c8c8-4580-a396-0c984317205d",
|
"source": "5ca498a4-c8c8-4580-a396-0c984317205d",
|
||||||
"target": "f50624ce-82bf-41d0-bdf7-8aab11a80d48",
|
"target": "f50624ce-82bf-41d0-bdf7-8aab11a80d48",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "width",
|
"sourceHandle": "width",
|
||||||
"targetHandle": "width"
|
"targetHandle": "width"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-5ca498a4-c8c8-4580-a396-0c984317205dheight-f50624ce-82bf-41d0-bdf7-8aab11a80d48height",
|
"id": "reactflow__edge-5ca498a4-c8c8-4580-a396-0c984317205dheight-f50624ce-82bf-41d0-bdf7-8aab11a80d48height",
|
||||||
|
"type": "default",
|
||||||
"source": "5ca498a4-c8c8-4580-a396-0c984317205d",
|
"source": "5ca498a4-c8c8-4580-a396-0c984317205d",
|
||||||
"target": "f50624ce-82bf-41d0-bdf7-8aab11a80d48",
|
"target": "f50624ce-82bf-41d0-bdf7-8aab11a80d48",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "height",
|
"sourceHandle": "height",
|
||||||
"targetHandle": "height"
|
"targetHandle": "height"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-f50624ce-82bf-41d0-bdf7-8aab11a80d48noise-c3737554-8d87-48ff-a6f8-e71d2867f434noise",
|
"id": "reactflow__edge-f50624ce-82bf-41d0-bdf7-8aab11a80d48noise-c3737554-8d87-48ff-a6f8-e71d2867f434noise",
|
||||||
|
"type": "default",
|
||||||
"source": "f50624ce-82bf-41d0-bdf7-8aab11a80d48",
|
"source": "f50624ce-82bf-41d0-bdf7-8aab11a80d48",
|
||||||
"target": "c3737554-8d87-48ff-a6f8-e71d2867f434",
|
"target": "c3737554-8d87-48ff-a6f8-e71d2867f434",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "noise",
|
"sourceHandle": "noise",
|
||||||
"targetHandle": "noise"
|
"targetHandle": "noise"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-5ca498a4-c8c8-4580-a396-0c984317205dlatents-c3737554-8d87-48ff-a6f8-e71d2867f434latents",
|
"id": "reactflow__edge-5ca498a4-c8c8-4580-a396-0c984317205dlatents-c3737554-8d87-48ff-a6f8-e71d2867f434latents",
|
||||||
|
"type": "default",
|
||||||
"source": "5ca498a4-c8c8-4580-a396-0c984317205d",
|
"source": "5ca498a4-c8c8-4580-a396-0c984317205d",
|
||||||
"target": "c3737554-8d87-48ff-a6f8-e71d2867f434",
|
"target": "c3737554-8d87-48ff-a6f8-e71d2867f434",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "latents",
|
"sourceHandle": "latents",
|
||||||
"targetHandle": "latents"
|
"targetHandle": "latents"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-e8bf67fe-67de-4227-87eb-79e86afdfc74conditioning-c3737554-8d87-48ff-a6f8-e71d2867f434negative_conditioning",
|
"id": "reactflow__edge-e8bf67fe-67de-4227-87eb-79e86afdfc74conditioning-c3737554-8d87-48ff-a6f8-e71d2867f434negative_conditioning",
|
||||||
|
"type": "default",
|
||||||
"source": "e8bf67fe-67de-4227-87eb-79e86afdfc74",
|
"source": "e8bf67fe-67de-4227-87eb-79e86afdfc74",
|
||||||
"target": "c3737554-8d87-48ff-a6f8-e71d2867f434",
|
"target": "c3737554-8d87-48ff-a6f8-e71d2867f434",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "conditioning",
|
"sourceHandle": "conditioning",
|
||||||
"targetHandle": "negative_conditioning"
|
"targetHandle": "negative_conditioning"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16bconditioning-c3737554-8d87-48ff-a6f8-e71d2867f434positive_conditioning",
|
"id": "reactflow__edge-63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16bconditioning-c3737554-8d87-48ff-a6f8-e71d2867f434positive_conditioning",
|
||||||
|
"type": "default",
|
||||||
"source": "63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16b",
|
"source": "63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16b",
|
||||||
"target": "c3737554-8d87-48ff-a6f8-e71d2867f434",
|
"target": "c3737554-8d87-48ff-a6f8-e71d2867f434",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "conditioning",
|
"sourceHandle": "conditioning",
|
||||||
"targetHandle": "positive_conditioning"
|
"targetHandle": "positive_conditioning"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-d8ace142-c05f-4f1d-8982-88dc7473958dclip-63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16bclip",
|
"id": "reactflow__edge-d8ace142-c05f-4f1d-8982-88dc7473958dclip-63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16bclip",
|
||||||
|
"type": "default",
|
||||||
"source": "d8ace142-c05f-4f1d-8982-88dc7473958d",
|
"source": "d8ace142-c05f-4f1d-8982-88dc7473958d",
|
||||||
"target": "63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16b",
|
"target": "63b6ab7e-5b05-4d1b-a3b1-42d8e53ce16b",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "clip",
|
"sourceHandle": "clip",
|
||||||
"targetHandle": "clip"
|
"targetHandle": "clip"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-d8ace142-c05f-4f1d-8982-88dc7473958dclip-e8bf67fe-67de-4227-87eb-79e86afdfc74clip",
|
"id": "reactflow__edge-d8ace142-c05f-4f1d-8982-88dc7473958dclip-e8bf67fe-67de-4227-87eb-79e86afdfc74clip",
|
||||||
|
"type": "default",
|
||||||
"source": "d8ace142-c05f-4f1d-8982-88dc7473958d",
|
"source": "d8ace142-c05f-4f1d-8982-88dc7473958d",
|
||||||
"target": "e8bf67fe-67de-4227-87eb-79e86afdfc74",
|
"target": "e8bf67fe-67de-4227-87eb-79e86afdfc74",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "clip",
|
"sourceHandle": "clip",
|
||||||
"targetHandle": "clip"
|
"targetHandle": "clip"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-1d887701-df21-4966-ae6e-a7d82307d7bdimage-ca1d020c-89a8-4958-880a-016d28775cfaimage",
|
"id": "reactflow__edge-1d887701-df21-4966-ae6e-a7d82307d7bdimage-ca1d020c-89a8-4958-880a-016d28775cfaimage",
|
||||||
|
"type": "default",
|
||||||
"source": "1d887701-df21-4966-ae6e-a7d82307d7bd",
|
"source": "1d887701-df21-4966-ae6e-a7d82307d7bd",
|
||||||
"target": "ca1d020c-89a8-4958-880a-016d28775cfa",
|
"target": "ca1d020c-89a8-4958-880a-016d28775cfa",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "image",
|
"sourceHandle": "image",
|
||||||
"targetHandle": "image"
|
"targetHandle": "image"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-ca1d020c-89a8-4958-880a-016d28775cfacontrol-c3737554-8d87-48ff-a6f8-e71d2867f434control",
|
"id": "reactflow__edge-ca1d020c-89a8-4958-880a-016d28775cfacontrol-c3737554-8d87-48ff-a6f8-e71d2867f434control",
|
||||||
|
"type": "default",
|
||||||
"source": "ca1d020c-89a8-4958-880a-016d28775cfa",
|
"source": "ca1d020c-89a8-4958-880a-016d28775cfa",
|
||||||
"target": "c3737554-8d87-48ff-a6f8-e71d2867f434",
|
"target": "c3737554-8d87-48ff-a6f8-e71d2867f434",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "control",
|
"sourceHandle": "control",
|
||||||
"targetHandle": "control"
|
"targetHandle": "control"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-c3737554-8d87-48ff-a6f8-e71d2867f434latents-3ed9b2ef-f4ec-40a7-94db-92e63b583ec0latents",
|
"id": "reactflow__edge-c3737554-8d87-48ff-a6f8-e71d2867f434latents-3ed9b2ef-f4ec-40a7-94db-92e63b583ec0latents",
|
||||||
|
"type": "default",
|
||||||
"source": "c3737554-8d87-48ff-a6f8-e71d2867f434",
|
"source": "c3737554-8d87-48ff-a6f8-e71d2867f434",
|
||||||
"target": "3ed9b2ef-f4ec-40a7-94db-92e63b583ec0",
|
"target": "3ed9b2ef-f4ec-40a7-94db-92e63b583ec0",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "latents",
|
"sourceHandle": "latents",
|
||||||
"targetHandle": "latents"
|
"targetHandle": "latents"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-d8ace142-c05f-4f1d-8982-88dc7473958dvae-3ed9b2ef-f4ec-40a7-94db-92e63b583ec0vae",
|
"id": "reactflow__edge-d8ace142-c05f-4f1d-8982-88dc7473958dvae-3ed9b2ef-f4ec-40a7-94db-92e63b583ec0vae",
|
||||||
|
"type": "default",
|
||||||
"source": "d8ace142-c05f-4f1d-8982-88dc7473958d",
|
"source": "d8ace142-c05f-4f1d-8982-88dc7473958d",
|
||||||
"target": "3ed9b2ef-f4ec-40a7-94db-92e63b583ec0",
|
"target": "3ed9b2ef-f4ec-40a7-94db-92e63b583ec0",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "vae",
|
"sourceHandle": "vae",
|
||||||
"targetHandle": "vae"
|
"targetHandle": "vae"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-f7564dd2-9539-47f2-ac13-190804461f4eimage-5ca498a4-c8c8-4580-a396-0c984317205dimage",
|
"id": "reactflow__edge-f7564dd2-9539-47f2-ac13-190804461f4eimage-5ca498a4-c8c8-4580-a396-0c984317205dimage",
|
||||||
|
"type": "default",
|
||||||
"source": "f7564dd2-9539-47f2-ac13-190804461f4e",
|
"source": "f7564dd2-9539-47f2-ac13-190804461f4e",
|
||||||
"target": "5ca498a4-c8c8-4580-a396-0c984317205d",
|
"target": "5ca498a4-c8c8-4580-a396-0c984317205d",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "image",
|
"sourceHandle": "image",
|
||||||
"targetHandle": "image"
|
"targetHandle": "image"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-d8ace142-c05f-4f1d-8982-88dc7473958dunet-c3737554-8d87-48ff-a6f8-e71d2867f434unet",
|
"id": "reactflow__edge-d8ace142-c05f-4f1d-8982-88dc7473958dunet-c3737554-8d87-48ff-a6f8-e71d2867f434unet",
|
||||||
|
"type": "default",
|
||||||
"source": "d8ace142-c05f-4f1d-8982-88dc7473958d",
|
"source": "d8ace142-c05f-4f1d-8982-88dc7473958d",
|
||||||
"target": "c3737554-8d87-48ff-a6f8-e71d2867f434",
|
"target": "c3737554-8d87-48ff-a6f8-e71d2867f434",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "unet",
|
"sourceHandle": "unet",
|
||||||
"targetHandle": "unet"
|
"targetHandle": "unet"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-d8ace142-c05f-4f1d-8982-88dc7473958dvae-5ca498a4-c8c8-4580-a396-0c984317205dvae",
|
"id": "reactflow__edge-d8ace142-c05f-4f1d-8982-88dc7473958dvae-5ca498a4-c8c8-4580-a396-0c984317205dvae",
|
||||||
|
"type": "default",
|
||||||
"source": "d8ace142-c05f-4f1d-8982-88dc7473958d",
|
"source": "d8ace142-c05f-4f1d-8982-88dc7473958d",
|
||||||
"target": "5ca498a4-c8c8-4580-a396-0c984317205d",
|
"target": "5ca498a4-c8c8-4580-a396-0c984317205d",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "vae",
|
"sourceHandle": "vae",
|
||||||
"targetHandle": "vae"
|
"targetHandle": "vae"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-eb8f6f8a-c7b1-4914-806e-045ee2717a35value-f50624ce-82bf-41d0-bdf7-8aab11a80d48seed",
|
"id": "reactflow__edge-eb8f6f8a-c7b1-4914-806e-045ee2717a35value-f50624ce-82bf-41d0-bdf7-8aab11a80d48seed",
|
||||||
|
"type": "default",
|
||||||
"source": "eb8f6f8a-c7b1-4914-806e-045ee2717a35",
|
"source": "eb8f6f8a-c7b1-4914-806e-045ee2717a35",
|
||||||
"target": "f50624ce-82bf-41d0-bdf7-8aab11a80d48",
|
"target": "f50624ce-82bf-41d0-bdf7-8aab11a80d48",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "value",
|
"sourceHandle": "value",
|
||||||
"targetHandle": "seed"
|
"targetHandle": "seed"
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
{
|
{
|
||||||
"id": "1e385b84-86f8-452e-9697-9e5abed20518",
|
|
||||||
"name": "Multi ControlNet (Canny & Depth)",
|
"name": "Multi ControlNet (Canny & Depth)",
|
||||||
"author": "InvokeAI",
|
"author": "InvokeAI",
|
||||||
"description": "A sample workflow using canny & depth ControlNets to guide the generation process. ",
|
"description": "A sample workflow using canny & depth ControlNets to guide the generation process. ",
|
||||||
@ -93,12 +92,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 225,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 3625,
|
"x": 3625,
|
||||||
"y": -75
|
"y": -75
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 189
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "a33199c2-8340-401e-b8a2-42ffa875fc1c",
|
"id": "a33199c2-8340-401e-b8a2-42ffa875fc1c",
|
||||||
@ -111,7 +110,7 @@
|
|||||||
"notes": "",
|
"notes": "",
|
||||||
"isIntermediate": true,
|
"isIntermediate": true,
|
||||||
"useCache": true,
|
"useCache": true,
|
||||||
"version": "1.1.0",
|
"version": "1.1.1",
|
||||||
"nodePack": "invokeai",
|
"nodePack": "invokeai",
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"image": {
|
"image": {
|
||||||
@ -214,12 +213,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 511,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 4477.604342844504,
|
"x": 4477.604342844504,
|
||||||
"y": -49.39005411272677
|
"y": -49.39005411272677
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 451
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "273e3f96-49ea-4dc5-9d5b-9660390f14e1",
|
"id": "273e3f96-49ea-4dc5-9d5b-9660390f14e1",
|
||||||
@ -272,12 +271,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 256,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 4075,
|
"x": 4075,
|
||||||
"y": -825
|
"y": -825
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 219
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "54486974-835b-4d81-8f82-05f9f32ce9e9",
|
"id": "54486974-835b-4d81-8f82-05f9f32ce9e9",
|
||||||
@ -343,12 +342,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 227,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 3600,
|
"x": 3600,
|
||||||
"y": -1000
|
"y": -1000
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 193
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "7ce68934-3419-42d4-ac70-82cfc9397306",
|
"id": "7ce68934-3419-42d4-ac70-82cfc9397306",
|
||||||
@ -401,12 +400,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 256,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 4075,
|
"x": 4075,
|
||||||
"y": -1125
|
"y": -1125
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 219
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "d204d184-f209-4fae-a0a1-d152800844e1",
|
"id": "d204d184-f209-4fae-a0a1-d152800844e1",
|
||||||
@ -419,7 +418,7 @@
|
|||||||
"notes": "",
|
"notes": "",
|
||||||
"isIntermediate": true,
|
"isIntermediate": true,
|
||||||
"useCache": true,
|
"useCache": true,
|
||||||
"version": "1.1.0",
|
"version": "1.1.1",
|
||||||
"nodePack": "invokeai",
|
"nodePack": "invokeai",
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"image": {
|
"image": {
|
||||||
@ -522,12 +521,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 511,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 4479.68542130465,
|
"x": 4479.68542130465,
|
||||||
"y": -618.4221638099414
|
"y": -618.4221638099414
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 451
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "c4b23e64-7986-40c4-9cad-46327b12e204",
|
"id": "c4b23e64-7986-40c4-9cad-46327b12e204",
|
||||||
@ -588,12 +587,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 225,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 3625,
|
"x": 3625,
|
||||||
"y": -425
|
"y": -425
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 189
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "ca4d5059-8bfb-447f-b415-da0faba5a143",
|
"id": "ca4d5059-8bfb-447f-b415-da0faba5a143",
|
||||||
@ -633,12 +632,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 104,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 4875,
|
"x": 4875,
|
||||||
"y": -575
|
"y": -575
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 87
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "018b1214-c2af-43a7-9910-fb687c6726d7",
|
"id": "018b1214-c2af-43a7-9910-fb687c6726d7",
|
||||||
@ -734,12 +733,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 340,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 4100,
|
"x": 4100,
|
||||||
"y": -75
|
"y": -75
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 293
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "c826ba5e-9676-4475-b260-07b85e88753c",
|
"id": "c826ba5e-9676-4475-b260-07b85e88753c",
|
||||||
@ -835,12 +834,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 340,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 4095.757337055795,
|
"x": 4095.757337055795,
|
||||||
"y": -455.63440891935863
|
"y": -455.63440891935863
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 293
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "9db25398-c869-4a63-8815-c6559341ef12",
|
"id": "9db25398-c869-4a63-8815-c6559341ef12",
|
||||||
@ -947,12 +946,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 267,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 5675,
|
"x": 5675,
|
||||||
"y": -825
|
"y": -825
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 224
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
|
"id": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
|
||||||
@ -965,7 +964,7 @@
|
|||||||
"notes": "",
|
"notes": "",
|
||||||
"isIntermediate": true,
|
"isIntermediate": true,
|
||||||
"useCache": true,
|
"useCache": true,
|
||||||
"version": "1.5.0",
|
"version": "1.5.1",
|
||||||
"nodePack": "invokeai",
|
"nodePack": "invokeai",
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"positive_conditioning": {
|
"positive_conditioning": {
|
||||||
@ -1173,12 +1172,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 705,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 5274.672987098195,
|
"x": 5274.672987098195,
|
||||||
"y": -823.0752416664332
|
"y": -823.0752416664332
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 612
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "2e77a0a1-db6a-47a2-a8bf-1e003be6423b",
|
"id": "2e77a0a1-db6a-47a2-a8bf-1e003be6423b",
|
||||||
@ -1275,12 +1274,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 4875,
|
"x": 4875,
|
||||||
"y": -675
|
"y": -675
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 24
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "8b260b4d-3fd6-44d4-b1be-9f0e43c628ce",
|
"id": "8b260b4d-3fd6-44d4-b1be-9f0e43c628ce",
|
||||||
@ -1333,146 +1332,146 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 4875,
|
"x": 4875,
|
||||||
"y": -750
|
"y": -750
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 24
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"edges": [
|
"edges": [
|
||||||
{
|
{
|
||||||
"id": "8b260b4d-3fd6-44d4-b1be-9f0e43c628ce-2e77a0a1-db6a-47a2-a8bf-1e003be6423b-collapsed",
|
"id": "8b260b4d-3fd6-44d4-b1be-9f0e43c628ce-2e77a0a1-db6a-47a2-a8bf-1e003be6423b-collapsed",
|
||||||
|
"type": "collapsed",
|
||||||
"source": "8b260b4d-3fd6-44d4-b1be-9f0e43c628ce",
|
"source": "8b260b4d-3fd6-44d4-b1be-9f0e43c628ce",
|
||||||
"target": "2e77a0a1-db6a-47a2-a8bf-1e003be6423b",
|
"target": "2e77a0a1-db6a-47a2-a8bf-1e003be6423b"
|
||||||
"type": "collapsed"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-54486974-835b-4d81-8f82-05f9f32ce9e9clip-7ce68934-3419-42d4-ac70-82cfc9397306clip",
|
"id": "reactflow__edge-54486974-835b-4d81-8f82-05f9f32ce9e9clip-7ce68934-3419-42d4-ac70-82cfc9397306clip",
|
||||||
|
"type": "default",
|
||||||
"source": "54486974-835b-4d81-8f82-05f9f32ce9e9",
|
"source": "54486974-835b-4d81-8f82-05f9f32ce9e9",
|
||||||
"target": "7ce68934-3419-42d4-ac70-82cfc9397306",
|
"target": "7ce68934-3419-42d4-ac70-82cfc9397306",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "clip",
|
"sourceHandle": "clip",
|
||||||
"targetHandle": "clip"
|
"targetHandle": "clip"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-54486974-835b-4d81-8f82-05f9f32ce9e9clip-273e3f96-49ea-4dc5-9d5b-9660390f14e1clip",
|
"id": "reactflow__edge-54486974-835b-4d81-8f82-05f9f32ce9e9clip-273e3f96-49ea-4dc5-9d5b-9660390f14e1clip",
|
||||||
|
"type": "default",
|
||||||
"source": "54486974-835b-4d81-8f82-05f9f32ce9e9",
|
"source": "54486974-835b-4d81-8f82-05f9f32ce9e9",
|
||||||
"target": "273e3f96-49ea-4dc5-9d5b-9660390f14e1",
|
"target": "273e3f96-49ea-4dc5-9d5b-9660390f14e1",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "clip",
|
"sourceHandle": "clip",
|
||||||
"targetHandle": "clip"
|
"targetHandle": "clip"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-a33199c2-8340-401e-b8a2-42ffa875fc1ccontrol-ca4d5059-8bfb-447f-b415-da0faba5a143item",
|
"id": "reactflow__edge-a33199c2-8340-401e-b8a2-42ffa875fc1ccontrol-ca4d5059-8bfb-447f-b415-da0faba5a143item",
|
||||||
|
"type": "default",
|
||||||
"source": "a33199c2-8340-401e-b8a2-42ffa875fc1c",
|
"source": "a33199c2-8340-401e-b8a2-42ffa875fc1c",
|
||||||
"target": "ca4d5059-8bfb-447f-b415-da0faba5a143",
|
"target": "ca4d5059-8bfb-447f-b415-da0faba5a143",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "control",
|
"sourceHandle": "control",
|
||||||
"targetHandle": "item"
|
"targetHandle": "item"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-d204d184-f209-4fae-a0a1-d152800844e1control-ca4d5059-8bfb-447f-b415-da0faba5a143item",
|
"id": "reactflow__edge-d204d184-f209-4fae-a0a1-d152800844e1control-ca4d5059-8bfb-447f-b415-da0faba5a143item",
|
||||||
|
"type": "default",
|
||||||
"source": "d204d184-f209-4fae-a0a1-d152800844e1",
|
"source": "d204d184-f209-4fae-a0a1-d152800844e1",
|
||||||
"target": "ca4d5059-8bfb-447f-b415-da0faba5a143",
|
"target": "ca4d5059-8bfb-447f-b415-da0faba5a143",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "control",
|
"sourceHandle": "control",
|
||||||
"targetHandle": "item"
|
"targetHandle": "item"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-8e860e51-5045-456e-bf04-9a62a2a5c49eimage-018b1214-c2af-43a7-9910-fb687c6726d7image",
|
"id": "reactflow__edge-8e860e51-5045-456e-bf04-9a62a2a5c49eimage-018b1214-c2af-43a7-9910-fb687c6726d7image",
|
||||||
|
"type": "default",
|
||||||
"source": "8e860e51-5045-456e-bf04-9a62a2a5c49e",
|
"source": "8e860e51-5045-456e-bf04-9a62a2a5c49e",
|
||||||
"target": "018b1214-c2af-43a7-9910-fb687c6726d7",
|
"target": "018b1214-c2af-43a7-9910-fb687c6726d7",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "image",
|
"sourceHandle": "image",
|
||||||
"targetHandle": "image"
|
"targetHandle": "image"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-018b1214-c2af-43a7-9910-fb687c6726d7image-a33199c2-8340-401e-b8a2-42ffa875fc1cimage",
|
"id": "reactflow__edge-018b1214-c2af-43a7-9910-fb687c6726d7image-a33199c2-8340-401e-b8a2-42ffa875fc1cimage",
|
||||||
|
"type": "default",
|
||||||
"source": "018b1214-c2af-43a7-9910-fb687c6726d7",
|
"source": "018b1214-c2af-43a7-9910-fb687c6726d7",
|
||||||
"target": "a33199c2-8340-401e-b8a2-42ffa875fc1c",
|
"target": "a33199c2-8340-401e-b8a2-42ffa875fc1c",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "image",
|
"sourceHandle": "image",
|
||||||
"targetHandle": "image"
|
"targetHandle": "image"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-c4b23e64-7986-40c4-9cad-46327b12e204image-c826ba5e-9676-4475-b260-07b85e88753cimage",
|
"id": "reactflow__edge-c4b23e64-7986-40c4-9cad-46327b12e204image-c826ba5e-9676-4475-b260-07b85e88753cimage",
|
||||||
|
"type": "default",
|
||||||
"source": "c4b23e64-7986-40c4-9cad-46327b12e204",
|
"source": "c4b23e64-7986-40c4-9cad-46327b12e204",
|
||||||
"target": "c826ba5e-9676-4475-b260-07b85e88753c",
|
"target": "c826ba5e-9676-4475-b260-07b85e88753c",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "image",
|
"sourceHandle": "image",
|
||||||
"targetHandle": "image"
|
"targetHandle": "image"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-c826ba5e-9676-4475-b260-07b85e88753cimage-d204d184-f209-4fae-a0a1-d152800844e1image",
|
"id": "reactflow__edge-c826ba5e-9676-4475-b260-07b85e88753cimage-d204d184-f209-4fae-a0a1-d152800844e1image",
|
||||||
|
"type": "default",
|
||||||
"source": "c826ba5e-9676-4475-b260-07b85e88753c",
|
"source": "c826ba5e-9676-4475-b260-07b85e88753c",
|
||||||
"target": "d204d184-f209-4fae-a0a1-d152800844e1",
|
"target": "d204d184-f209-4fae-a0a1-d152800844e1",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "image",
|
"sourceHandle": "image",
|
||||||
"targetHandle": "image"
|
"targetHandle": "image"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-54486974-835b-4d81-8f82-05f9f32ce9e9vae-9db25398-c869-4a63-8815-c6559341ef12vae",
|
"id": "reactflow__edge-54486974-835b-4d81-8f82-05f9f32ce9e9vae-9db25398-c869-4a63-8815-c6559341ef12vae",
|
||||||
|
"type": "default",
|
||||||
"source": "54486974-835b-4d81-8f82-05f9f32ce9e9",
|
"source": "54486974-835b-4d81-8f82-05f9f32ce9e9",
|
||||||
"target": "9db25398-c869-4a63-8815-c6559341ef12",
|
"target": "9db25398-c869-4a63-8815-c6559341ef12",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "vae",
|
"sourceHandle": "vae",
|
||||||
"targetHandle": "vae"
|
"targetHandle": "vae"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-ac481b7f-08bf-4a9d-9e0c-3a82ea5243celatents-9db25398-c869-4a63-8815-c6559341ef12latents",
|
"id": "reactflow__edge-ac481b7f-08bf-4a9d-9e0c-3a82ea5243celatents-9db25398-c869-4a63-8815-c6559341ef12latents",
|
||||||
|
"type": "default",
|
||||||
"source": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
|
"source": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
|
||||||
"target": "9db25398-c869-4a63-8815-c6559341ef12",
|
"target": "9db25398-c869-4a63-8815-c6559341ef12",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "latents",
|
"sourceHandle": "latents",
|
||||||
"targetHandle": "latents"
|
"targetHandle": "latents"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-ca4d5059-8bfb-447f-b415-da0faba5a143collection-ac481b7f-08bf-4a9d-9e0c-3a82ea5243cecontrol",
|
"id": "reactflow__edge-ca4d5059-8bfb-447f-b415-da0faba5a143collection-ac481b7f-08bf-4a9d-9e0c-3a82ea5243cecontrol",
|
||||||
|
"type": "default",
|
||||||
"source": "ca4d5059-8bfb-447f-b415-da0faba5a143",
|
"source": "ca4d5059-8bfb-447f-b415-da0faba5a143",
|
||||||
"target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
|
"target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "collection",
|
"sourceHandle": "collection",
|
||||||
"targetHandle": "control"
|
"targetHandle": "control"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-54486974-835b-4d81-8f82-05f9f32ce9e9unet-ac481b7f-08bf-4a9d-9e0c-3a82ea5243ceunet",
|
"id": "reactflow__edge-54486974-835b-4d81-8f82-05f9f32ce9e9unet-ac481b7f-08bf-4a9d-9e0c-3a82ea5243ceunet",
|
||||||
|
"type": "default",
|
||||||
"source": "54486974-835b-4d81-8f82-05f9f32ce9e9",
|
"source": "54486974-835b-4d81-8f82-05f9f32ce9e9",
|
||||||
"target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
|
"target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "unet",
|
"sourceHandle": "unet",
|
||||||
"targetHandle": "unet"
|
"targetHandle": "unet"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-273e3f96-49ea-4dc5-9d5b-9660390f14e1conditioning-ac481b7f-08bf-4a9d-9e0c-3a82ea5243cenegative_conditioning",
|
"id": "reactflow__edge-273e3f96-49ea-4dc5-9d5b-9660390f14e1conditioning-ac481b7f-08bf-4a9d-9e0c-3a82ea5243cenegative_conditioning",
|
||||||
|
"type": "default",
|
||||||
"source": "273e3f96-49ea-4dc5-9d5b-9660390f14e1",
|
"source": "273e3f96-49ea-4dc5-9d5b-9660390f14e1",
|
||||||
"target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
|
"target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "conditioning",
|
"sourceHandle": "conditioning",
|
||||||
"targetHandle": "negative_conditioning"
|
"targetHandle": "negative_conditioning"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-7ce68934-3419-42d4-ac70-82cfc9397306conditioning-ac481b7f-08bf-4a9d-9e0c-3a82ea5243cepositive_conditioning",
|
"id": "reactflow__edge-7ce68934-3419-42d4-ac70-82cfc9397306conditioning-ac481b7f-08bf-4a9d-9e0c-3a82ea5243cepositive_conditioning",
|
||||||
|
"type": "default",
|
||||||
"source": "7ce68934-3419-42d4-ac70-82cfc9397306",
|
"source": "7ce68934-3419-42d4-ac70-82cfc9397306",
|
||||||
"target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
|
"target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "conditioning",
|
"sourceHandle": "conditioning",
|
||||||
"targetHandle": "positive_conditioning"
|
"targetHandle": "positive_conditioning"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-2e77a0a1-db6a-47a2-a8bf-1e003be6423bnoise-ac481b7f-08bf-4a9d-9e0c-3a82ea5243cenoise",
|
"id": "reactflow__edge-2e77a0a1-db6a-47a2-a8bf-1e003be6423bnoise-ac481b7f-08bf-4a9d-9e0c-3a82ea5243cenoise",
|
||||||
|
"type": "default",
|
||||||
"source": "2e77a0a1-db6a-47a2-a8bf-1e003be6423b",
|
"source": "2e77a0a1-db6a-47a2-a8bf-1e003be6423b",
|
||||||
"target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
|
"target": "ac481b7f-08bf-4a9d-9e0c-3a82ea5243ce",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "noise",
|
"sourceHandle": "noise",
|
||||||
"targetHandle": "noise"
|
"targetHandle": "noise"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-8b260b4d-3fd6-44d4-b1be-9f0e43c628cevalue-2e77a0a1-db6a-47a2-a8bf-1e003be6423bseed",
|
"id": "reactflow__edge-8b260b4d-3fd6-44d4-b1be-9f0e43c628cevalue-2e77a0a1-db6a-47a2-a8bf-1e003be6423bseed",
|
||||||
|
"type": "default",
|
||||||
"source": "8b260b4d-3fd6-44d4-b1be-9f0e43c628ce",
|
"source": "8b260b4d-3fd6-44d4-b1be-9f0e43c628ce",
|
||||||
"target": "2e77a0a1-db6a-47a2-a8bf-1e003be6423b",
|
"target": "2e77a0a1-db6a-47a2-a8bf-1e003be6423b",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "value",
|
"sourceHandle": "value",
|
||||||
"targetHandle": "seed"
|
"targetHandle": "seed"
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
"category": "default",
|
"category": "default",
|
||||||
"version": "2.0.0"
|
"version": "2.0.0"
|
||||||
},
|
},
|
||||||
"id": "d1609af5-eb0a-4f73-b573-c9af96a8d6bf",
|
|
||||||
"nodes": [
|
"nodes": [
|
||||||
{
|
{
|
||||||
"id": "c2eaf1ba-5708-4679-9e15-945b8b432692",
|
"id": "c2eaf1ba-5708-4679-9e15-945b8b432692",
|
||||||
@ -73,12 +72,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 925,
|
"x": 925,
|
||||||
"y": -200
|
"y": -200
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 24
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
|
"id": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
|
||||||
@ -168,12 +167,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 580,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 475,
|
"x": 475,
|
||||||
"y": -400
|
"y": -400
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 506
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
"id": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
||||||
@ -233,12 +232,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 925,
|
"x": 925,
|
||||||
"y": -400
|
"y": -400
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 24
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
"id": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||||
@ -304,12 +303,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 227,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 0,
|
"x": 0,
|
||||||
"y": -375
|
"y": -375
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 193
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
"id": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
||||||
@ -362,12 +361,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 925,
|
"x": 925,
|
||||||
"y": -275
|
"y": -275
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 24
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
"id": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
||||||
@ -465,12 +464,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 925,
|
"x": 925,
|
||||||
"y": 25
|
"y": 25
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 24
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
|
"id": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
|
||||||
@ -524,12 +523,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 925,
|
"x": 925,
|
||||||
"y": -50
|
"y": -50
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 24
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
|
"id": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
|
||||||
@ -636,12 +635,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 267,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 2037.861329274915,
|
"x": 2037.861329274915,
|
||||||
"y": -329.8393457509562
|
"y": -329.8393457509562
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 224
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
"id": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||||
@ -654,7 +653,7 @@
|
|||||||
"notes": "",
|
"notes": "",
|
||||||
"isIntermediate": true,
|
"isIntermediate": true,
|
||||||
"useCache": true,
|
"useCache": true,
|
||||||
"version": "1.5.0",
|
"version": "1.5.1",
|
||||||
"nodePack": "invokeai",
|
"nodePack": "invokeai",
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"positive_conditioning": {
|
"positive_conditioning": {
|
||||||
@ -862,112 +861,112 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 705,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 1570.9941088179146,
|
"x": 1570.9941088179146,
|
||||||
"y": -407.6505491604564
|
"y": -407.6505491604564
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 612
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"edges": [
|
"edges": [
|
||||||
{
|
{
|
||||||
"id": "1b89067c-3f6b-42c8-991f-e3055789b251-fc9d0e35-a6de-4a19-84e1-c72497c823f6-collapsed",
|
"id": "1b89067c-3f6b-42c8-991f-e3055789b251-fc9d0e35-a6de-4a19-84e1-c72497c823f6-collapsed",
|
||||||
|
"type": "collapsed",
|
||||||
"source": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
"source": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
||||||
"target": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
"target": "fc9d0e35-a6de-4a19-84e1-c72497c823f6"
|
||||||
"type": "collapsed"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5-0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77-collapsed",
|
"id": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5-0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77-collapsed",
|
||||||
|
"type": "collapsed",
|
||||||
"source": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
|
"source": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
|
||||||
"target": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
"target": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77"
|
||||||
"type": "collapsed"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-1b7e0df8-8589-4915-a4ea-c0088f15d642collection-1b89067c-3f6b-42c8-991f-e3055789b251collection",
|
"id": "reactflow__edge-1b7e0df8-8589-4915-a4ea-c0088f15d642collection-1b89067c-3f6b-42c8-991f-e3055789b251collection",
|
||||||
|
"type": "default",
|
||||||
"source": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
|
"source": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
|
||||||
"target": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
"target": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "collection",
|
"sourceHandle": "collection",
|
||||||
"targetHandle": "collection"
|
"targetHandle": "collection"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426clip-fc9d0e35-a6de-4a19-84e1-c72497c823f6clip",
|
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426clip-fc9d0e35-a6de-4a19-84e1-c72497c823f6clip",
|
||||||
|
"type": "default",
|
||||||
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||||
"target": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
"target": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "clip",
|
"sourceHandle": "clip",
|
||||||
"targetHandle": "clip"
|
"targetHandle": "clip"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-1b89067c-3f6b-42c8-991f-e3055789b251item-fc9d0e35-a6de-4a19-84e1-c72497c823f6prompt",
|
"id": "reactflow__edge-1b89067c-3f6b-42c8-991f-e3055789b251item-fc9d0e35-a6de-4a19-84e1-c72497c823f6prompt",
|
||||||
|
"type": "default",
|
||||||
"source": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
"source": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
||||||
"target": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
"target": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "item",
|
"sourceHandle": "item",
|
||||||
"targetHandle": "prompt"
|
"targetHandle": "prompt"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426clip-c2eaf1ba-5708-4679-9e15-945b8b432692clip",
|
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426clip-c2eaf1ba-5708-4679-9e15-945b8b432692clip",
|
||||||
|
"type": "default",
|
||||||
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||||
"target": "c2eaf1ba-5708-4679-9e15-945b8b432692",
|
"target": "c2eaf1ba-5708-4679-9e15-945b8b432692",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "clip",
|
"sourceHandle": "clip",
|
||||||
"targetHandle": "clip"
|
"targetHandle": "clip"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5value-0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77seed",
|
"id": "reactflow__edge-dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5value-0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77seed",
|
||||||
|
"type": "default",
|
||||||
"source": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
|
"source": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
|
||||||
"target": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
"target": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "value",
|
"sourceHandle": "value",
|
||||||
"targetHandle": "seed"
|
"targetHandle": "seed"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-fc9d0e35-a6de-4a19-84e1-c72497c823f6conditioning-2fb1577f-0a56-4f12-8711-8afcaaaf1d5epositive_conditioning",
|
"id": "reactflow__edge-fc9d0e35-a6de-4a19-84e1-c72497c823f6conditioning-2fb1577f-0a56-4f12-8711-8afcaaaf1d5epositive_conditioning",
|
||||||
|
"type": "default",
|
||||||
"source": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
"source": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
||||||
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "conditioning",
|
"sourceHandle": "conditioning",
|
||||||
"targetHandle": "positive_conditioning"
|
"targetHandle": "positive_conditioning"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-c2eaf1ba-5708-4679-9e15-945b8b432692conditioning-2fb1577f-0a56-4f12-8711-8afcaaaf1d5enegative_conditioning",
|
"id": "reactflow__edge-c2eaf1ba-5708-4679-9e15-945b8b432692conditioning-2fb1577f-0a56-4f12-8711-8afcaaaf1d5enegative_conditioning",
|
||||||
|
"type": "default",
|
||||||
"source": "c2eaf1ba-5708-4679-9e15-945b8b432692",
|
"source": "c2eaf1ba-5708-4679-9e15-945b8b432692",
|
||||||
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "conditioning",
|
"sourceHandle": "conditioning",
|
||||||
"targetHandle": "negative_conditioning"
|
"targetHandle": "negative_conditioning"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77noise-2fb1577f-0a56-4f12-8711-8afcaaaf1d5enoise",
|
"id": "reactflow__edge-0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77noise-2fb1577f-0a56-4f12-8711-8afcaaaf1d5enoise",
|
||||||
|
"type": "default",
|
||||||
"source": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
"source": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
||||||
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "noise",
|
"sourceHandle": "noise",
|
||||||
"targetHandle": "noise"
|
"targetHandle": "noise"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426unet-2fb1577f-0a56-4f12-8711-8afcaaaf1d5eunet",
|
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426unet-2fb1577f-0a56-4f12-8711-8afcaaaf1d5eunet",
|
||||||
|
"type": "default",
|
||||||
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||||
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "unet",
|
"sourceHandle": "unet",
|
||||||
"targetHandle": "unet"
|
"targetHandle": "unet"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-2fb1577f-0a56-4f12-8711-8afcaaaf1d5elatents-491ec988-3c77-4c37-af8a-39a0c4e7a2a1latents",
|
"id": "reactflow__edge-2fb1577f-0a56-4f12-8711-8afcaaaf1d5elatents-491ec988-3c77-4c37-af8a-39a0c4e7a2a1latents",
|
||||||
|
"type": "default",
|
||||||
"source": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
"source": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||||
"target": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
|
"target": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "latents",
|
"sourceHandle": "latents",
|
||||||
"targetHandle": "latents"
|
"targetHandle": "latents"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426vae-491ec988-3c77-4c37-af8a-39a0c4e7a2a1vae",
|
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426vae-491ec988-3c77-4c37-af8a-39a0c4e7a2a1vae",
|
||||||
|
"type": "default",
|
||||||
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||||
"target": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
|
"target": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "vae",
|
"sourceHandle": "vae",
|
||||||
"targetHandle": "vae"
|
"targetHandle": "vae"
|
||||||
}
|
}
|
||||||
|
@ -80,12 +80,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 750,
|
"x": 750,
|
||||||
"y": -225
|
"y": -225
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 24
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "719dabe8-8297-4749-aea1-37be301cd425",
|
"id": "719dabe8-8297-4749-aea1-37be301cd425",
|
||||||
@ -126,12 +126,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 258,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 750,
|
"x": 750,
|
||||||
"y": -125
|
"y": -125
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 219
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
"id": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||||
@ -279,12 +279,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 750,
|
"x": 750,
|
||||||
"y": 200
|
"y": 200
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 24
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "55705012-79b9-4aac-9f26-c0b10309785b",
|
"id": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||||
@ -382,12 +382,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 388,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 375,
|
"x": 375,
|
||||||
"y": 0
|
"y": 0
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 336
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||||
@ -441,12 +441,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 375,
|
"x": 375,
|
||||||
"y": -50
|
"y": -50
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 24
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
"id": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||||
@ -471,8 +471,7 @@
|
|||||||
"isCollection": false,
|
"isCollection": false,
|
||||||
"isCollectionOrScalar": false,
|
"isCollectionOrScalar": false,
|
||||||
"name": "SDXLMainModelField"
|
"name": "SDXLMainModelField"
|
||||||
},
|
}
|
||||||
"value": null
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": {
|
"outputs": {
|
||||||
@ -518,12 +517,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 257,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 375,
|
"x": 375,
|
||||||
"y": -500
|
"y": -500
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 219
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
"id": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||||
@ -671,12 +670,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 750,
|
"x": 750,
|
||||||
"y": -175
|
"y": -175
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 24
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "63e91020-83b2-4f35-b174-ad9692aabb48",
|
"id": "63e91020-83b2-4f35-b174-ad9692aabb48",
|
||||||
@ -783,12 +782,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 266,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 1475,
|
"x": 1475,
|
||||||
"y": -500
|
"y": -500
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 224
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
"id": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||||
@ -801,7 +800,7 @@
|
|||||||
"notes": "",
|
"notes": "",
|
||||||
"isIntermediate": true,
|
"isIntermediate": true,
|
||||||
"useCache": true,
|
"useCache": true,
|
||||||
"version": "1.5.0",
|
"version": "1.5.1",
|
||||||
"nodePack": "invokeai",
|
"nodePack": "invokeai",
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"positive_conditioning": {
|
"positive_conditioning": {
|
||||||
@ -1009,12 +1008,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 702,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 1125,
|
"x": 1125,
|
||||||
"y": -500
|
"y": -500
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 612
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "0093692f-9cf4-454d-a5b8-62f0e3eb3bb8",
|
"id": "0093692f-9cf4-454d-a5b8-62f0e3eb3bb8",
|
||||||
@ -1038,8 +1037,7 @@
|
|||||||
"isCollection": false,
|
"isCollection": false,
|
||||||
"isCollectionOrScalar": false,
|
"isCollectionOrScalar": false,
|
||||||
"name": "VAEModelField"
|
"name": "VAEModelField"
|
||||||
},
|
}
|
||||||
"value": null
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": {
|
"outputs": {
|
||||||
@ -1055,12 +1053,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 161,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 375,
|
"x": 375,
|
||||||
"y": -225
|
"y": -225
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 139
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "ade2c0d3-0384-4157-b39b-29ce429cfa15",
|
"id": "ade2c0d3-0384-4157-b39b-29ce429cfa15",
|
||||||
@ -1101,12 +1099,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 258,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 750,
|
"x": 750,
|
||||||
"y": -500
|
"y": -500
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 219
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "ad8fa655-3a76-43d0-9c02-4d7644dea650",
|
"id": "ad8fa655-3a76-43d0-9c02-4d7644dea650",
|
||||||
@ -1159,162 +1157,162 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 750,
|
"x": 750,
|
||||||
"y": 150
|
"y": 150
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 24
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"edges": [
|
"edges": [
|
||||||
{
|
{
|
||||||
"id": "3774ec24-a69e-4254-864c-097d07a6256f-faf965a4-7530-427b-b1f3-4ba6505c2a08-collapsed",
|
"id": "3774ec24-a69e-4254-864c-097d07a6256f-faf965a4-7530-427b-b1f3-4ba6505c2a08-collapsed",
|
||||||
|
"type": "collapsed",
|
||||||
"source": "3774ec24-a69e-4254-864c-097d07a6256f",
|
"source": "3774ec24-a69e-4254-864c-097d07a6256f",
|
||||||
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08"
|
||||||
"type": "collapsed"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "ad8fa655-3a76-43d0-9c02-4d7644dea650-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204-collapsed",
|
"id": "ad8fa655-3a76-43d0-9c02-4d7644dea650-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204-collapsed",
|
||||||
|
"type": "collapsed",
|
||||||
"source": "ad8fa655-3a76-43d0-9c02-4d7644dea650",
|
"source": "ad8fa655-3a76-43d0-9c02-4d7644dea650",
|
||||||
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204"
|
||||||
"type": "collapsed"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-ea94bc37-d995-4a83-aa99-4af42479f2f2value-55705012-79b9-4aac-9f26-c0b10309785bseed",
|
"id": "reactflow__edge-ea94bc37-d995-4a83-aa99-4af42479f2f2value-55705012-79b9-4aac-9f26-c0b10309785bseed",
|
||||||
|
"type": "default",
|
||||||
"source": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
"source": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||||
"target": "55705012-79b9-4aac-9f26-c0b10309785b",
|
"target": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "value",
|
"sourceHandle": "value",
|
||||||
"targetHandle": "seed"
|
"targetHandle": "seed"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip-faf965a4-7530-427b-b1f3-4ba6505c2a08clip",
|
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip-faf965a4-7530-427b-b1f3-4ba6505c2a08clip",
|
||||||
|
"type": "default",
|
||||||
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||||
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "clip",
|
"sourceHandle": "clip",
|
||||||
"targetHandle": "clip"
|
"targetHandle": "clip"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip2-faf965a4-7530-427b-b1f3-4ba6505c2a08clip2",
|
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip2-faf965a4-7530-427b-b1f3-4ba6505c2a08clip2",
|
||||||
|
"type": "default",
|
||||||
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||||
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "clip2",
|
"sourceHandle": "clip2",
|
||||||
"targetHandle": "clip2"
|
"targetHandle": "clip2"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204clip",
|
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204clip",
|
||||||
|
"type": "default",
|
||||||
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||||
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "clip",
|
"sourceHandle": "clip",
|
||||||
"targetHandle": "clip"
|
"targetHandle": "clip"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip2-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204clip2",
|
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22clip2-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204clip2",
|
||||||
|
"type": "default",
|
||||||
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||||
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "clip2",
|
"sourceHandle": "clip2",
|
||||||
"targetHandle": "clip2"
|
"targetHandle": "clip2"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22unet-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbunet",
|
"id": "reactflow__edge-30d3289c-773c-4152-a9d2-bd8a99c8fd22unet-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbunet",
|
||||||
|
"type": "default",
|
||||||
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
"source": "30d3289c-773c-4152-a9d2-bd8a99c8fd22",
|
||||||
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "unet",
|
"sourceHandle": "unet",
|
||||||
"targetHandle": "unet"
|
"targetHandle": "unet"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-faf965a4-7530-427b-b1f3-4ba6505c2a08conditioning-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbpositive_conditioning",
|
"id": "reactflow__edge-faf965a4-7530-427b-b1f3-4ba6505c2a08conditioning-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbpositive_conditioning",
|
||||||
|
"type": "default",
|
||||||
"source": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
"source": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||||
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "conditioning",
|
"sourceHandle": "conditioning",
|
||||||
"targetHandle": "positive_conditioning"
|
"targetHandle": "positive_conditioning"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204conditioning-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbnegative_conditioning",
|
"id": "reactflow__edge-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204conditioning-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbnegative_conditioning",
|
||||||
|
"type": "default",
|
||||||
"source": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
"source": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||||
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "conditioning",
|
"sourceHandle": "conditioning",
|
||||||
"targetHandle": "negative_conditioning"
|
"targetHandle": "negative_conditioning"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbnoise",
|
"id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfbnoise",
|
||||||
|
"type": "default",
|
||||||
"source": "55705012-79b9-4aac-9f26-c0b10309785b",
|
"source": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||||
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
"target": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "noise",
|
"sourceHandle": "noise",
|
||||||
"targetHandle": "noise"
|
"targetHandle": "noise"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfblatents-63e91020-83b2-4f35-b174-ad9692aabb48latents",
|
"id": "reactflow__edge-50a36525-3c0a-4cc5-977c-e4bfc3fd6dfblatents-63e91020-83b2-4f35-b174-ad9692aabb48latents",
|
||||||
|
"type": "default",
|
||||||
"source": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
"source": "50a36525-3c0a-4cc5-977c-e4bfc3fd6dfb",
|
||||||
"target": "63e91020-83b2-4f35-b174-ad9692aabb48",
|
"target": "63e91020-83b2-4f35-b174-ad9692aabb48",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "latents",
|
"sourceHandle": "latents",
|
||||||
"targetHandle": "latents"
|
"targetHandle": "latents"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-0093692f-9cf4-454d-a5b8-62f0e3eb3bb8vae-63e91020-83b2-4f35-b174-ad9692aabb48vae",
|
"id": "reactflow__edge-0093692f-9cf4-454d-a5b8-62f0e3eb3bb8vae-63e91020-83b2-4f35-b174-ad9692aabb48vae",
|
||||||
|
"type": "default",
|
||||||
"source": "0093692f-9cf4-454d-a5b8-62f0e3eb3bb8",
|
"source": "0093692f-9cf4-454d-a5b8-62f0e3eb3bb8",
|
||||||
"target": "63e91020-83b2-4f35-b174-ad9692aabb48",
|
"target": "63e91020-83b2-4f35-b174-ad9692aabb48",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "vae",
|
"sourceHandle": "vae",
|
||||||
"targetHandle": "vae"
|
"targetHandle": "vae"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-ade2c0d3-0384-4157-b39b-29ce429cfa15value-faf965a4-7530-427b-b1f3-4ba6505c2a08prompt",
|
"id": "reactflow__edge-ade2c0d3-0384-4157-b39b-29ce429cfa15value-faf965a4-7530-427b-b1f3-4ba6505c2a08prompt",
|
||||||
|
"type": "default",
|
||||||
"source": "ade2c0d3-0384-4157-b39b-29ce429cfa15",
|
"source": "ade2c0d3-0384-4157-b39b-29ce429cfa15",
|
||||||
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "value",
|
"sourceHandle": "value",
|
||||||
"targetHandle": "prompt"
|
"targetHandle": "prompt"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-719dabe8-8297-4749-aea1-37be301cd425value-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204prompt",
|
"id": "reactflow__edge-719dabe8-8297-4749-aea1-37be301cd425value-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204prompt",
|
||||||
|
"type": "default",
|
||||||
"source": "719dabe8-8297-4749-aea1-37be301cd425",
|
"source": "719dabe8-8297-4749-aea1-37be301cd425",
|
||||||
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "value",
|
"sourceHandle": "value",
|
||||||
"targetHandle": "prompt"
|
"targetHandle": "prompt"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-719dabe8-8297-4749-aea1-37be301cd425value-ad8fa655-3a76-43d0-9c02-4d7644dea650string_left",
|
"id": "reactflow__edge-719dabe8-8297-4749-aea1-37be301cd425value-ad8fa655-3a76-43d0-9c02-4d7644dea650string_left",
|
||||||
|
"type": "default",
|
||||||
"source": "719dabe8-8297-4749-aea1-37be301cd425",
|
"source": "719dabe8-8297-4749-aea1-37be301cd425",
|
||||||
"target": "ad8fa655-3a76-43d0-9c02-4d7644dea650",
|
"target": "ad8fa655-3a76-43d0-9c02-4d7644dea650",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "value",
|
"sourceHandle": "value",
|
||||||
"targetHandle": "string_left"
|
"targetHandle": "string_left"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-ad8fa655-3a76-43d0-9c02-4d7644dea650value-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204style",
|
"id": "reactflow__edge-ad8fa655-3a76-43d0-9c02-4d7644dea650value-3193ad09-a7c2-4bf4-a3a9-1c61cc33a204style",
|
||||||
|
"type": "default",
|
||||||
"source": "ad8fa655-3a76-43d0-9c02-4d7644dea650",
|
"source": "ad8fa655-3a76-43d0-9c02-4d7644dea650",
|
||||||
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
"target": "3193ad09-a7c2-4bf4-a3a9-1c61cc33a204",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "value",
|
"sourceHandle": "value",
|
||||||
"targetHandle": "style"
|
"targetHandle": "style"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-ade2c0d3-0384-4157-b39b-29ce429cfa15value-3774ec24-a69e-4254-864c-097d07a6256fstring_left",
|
"id": "reactflow__edge-ade2c0d3-0384-4157-b39b-29ce429cfa15value-3774ec24-a69e-4254-864c-097d07a6256fstring_left",
|
||||||
|
"type": "default",
|
||||||
"source": "ade2c0d3-0384-4157-b39b-29ce429cfa15",
|
"source": "ade2c0d3-0384-4157-b39b-29ce429cfa15",
|
||||||
"target": "3774ec24-a69e-4254-864c-097d07a6256f",
|
"target": "3774ec24-a69e-4254-864c-097d07a6256f",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "value",
|
"sourceHandle": "value",
|
||||||
"targetHandle": "string_left"
|
"targetHandle": "string_left"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-3774ec24-a69e-4254-864c-097d07a6256fvalue-faf965a4-7530-427b-b1f3-4ba6505c2a08style",
|
"id": "reactflow__edge-3774ec24-a69e-4254-864c-097d07a6256fvalue-faf965a4-7530-427b-b1f3-4ba6505c2a08style",
|
||||||
|
"type": "default",
|
||||||
"source": "3774ec24-a69e-4254-864c-097d07a6256f",
|
"source": "3774ec24-a69e-4254-864c-097d07a6256f",
|
||||||
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
"target": "faf965a4-7530-427b-b1f3-4ba6505c2a08",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "value",
|
"sourceHandle": "value",
|
||||||
"targetHandle": "style"
|
"targetHandle": "style"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
@ -84,12 +84,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 259,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 1000,
|
"x": 1000,
|
||||||
"y": 350
|
"y": 350
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 219
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "55705012-79b9-4aac-9f26-c0b10309785b",
|
"id": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||||
@ -187,12 +187,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 388,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 600,
|
"x": 600,
|
||||||
"y": 325
|
"y": 325
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 388
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
"id": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||||
@ -258,12 +258,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 226,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 600,
|
"x": 600,
|
||||||
"y": 25
|
"y": 25
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 193
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
"id": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||||
@ -316,12 +316,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 259,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 1000,
|
"x": 1000,
|
||||||
"y": 25
|
"y": 25
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 219
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
"id": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||||
@ -375,12 +375,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 600,
|
"x": 600,
|
||||||
"y": 275
|
"y": 275
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 32
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
"id": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||||
@ -393,7 +393,7 @@
|
|||||||
"notes": "",
|
"notes": "",
|
||||||
"isIntermediate": true,
|
"isIntermediate": true,
|
||||||
"useCache": true,
|
"useCache": true,
|
||||||
"version": "1.5.0",
|
"version": "1.5.1",
|
||||||
"nodePack": "invokeai",
|
"nodePack": "invokeai",
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"positive_conditioning": {
|
"positive_conditioning": {
|
||||||
@ -601,12 +601,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 703,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 1400,
|
"x": 1400,
|
||||||
"y": 25
|
"y": 25
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 612
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
"id": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||||
@ -713,86 +713,86 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 266,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 1800,
|
"x": 1800,
|
||||||
"y": 25
|
"y": 25
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 224
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"edges": [
|
"edges": [
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-ea94bc37-d995-4a83-aa99-4af42479f2f2value-55705012-79b9-4aac-9f26-c0b10309785bseed",
|
"id": "reactflow__edge-ea94bc37-d995-4a83-aa99-4af42479f2f2value-55705012-79b9-4aac-9f26-c0b10309785bseed",
|
||||||
|
"type": "default",
|
||||||
"source": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
"source": "ea94bc37-d995-4a83-aa99-4af42479f2f2",
|
||||||
"target": "55705012-79b9-4aac-9f26-c0b10309785b",
|
"target": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "value",
|
"sourceHandle": "value",
|
||||||
"targetHandle": "seed"
|
"targetHandle": "seed"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8clip-7d8bf987-284f-413a-b2fd-d825445a5d6cclip",
|
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8clip-7d8bf987-284f-413a-b2fd-d825445a5d6cclip",
|
||||||
|
"type": "default",
|
||||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||||
"target": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
"target": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "clip",
|
"sourceHandle": "clip",
|
||||||
"targetHandle": "clip"
|
"targetHandle": "clip"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8clip-93dc02a4-d05b-48ed-b99c-c9b616af3402clip",
|
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8clip-93dc02a4-d05b-48ed-b99c-c9b616af3402clip",
|
||||||
|
"type": "default",
|
||||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||||
"target": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
"target": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "clip",
|
"sourceHandle": "clip",
|
||||||
"targetHandle": "clip"
|
"targetHandle": "clip"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-eea2702a-19fb-45b5-9d75-56b4211ec03cnoise",
|
"id": "reactflow__edge-55705012-79b9-4aac-9f26-c0b10309785bnoise-eea2702a-19fb-45b5-9d75-56b4211ec03cnoise",
|
||||||
|
"type": "default",
|
||||||
"source": "55705012-79b9-4aac-9f26-c0b10309785b",
|
"source": "55705012-79b9-4aac-9f26-c0b10309785b",
|
||||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "noise",
|
"sourceHandle": "noise",
|
||||||
"targetHandle": "noise"
|
"targetHandle": "noise"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-7d8bf987-284f-413a-b2fd-d825445a5d6cconditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cpositive_conditioning",
|
"id": "reactflow__edge-7d8bf987-284f-413a-b2fd-d825445a5d6cconditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cpositive_conditioning",
|
||||||
|
"type": "default",
|
||||||
"source": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
"source": "7d8bf987-284f-413a-b2fd-d825445a5d6c",
|
||||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "conditioning",
|
"sourceHandle": "conditioning",
|
||||||
"targetHandle": "positive_conditioning"
|
"targetHandle": "positive_conditioning"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-93dc02a4-d05b-48ed-b99c-c9b616af3402conditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cnegative_conditioning",
|
"id": "reactflow__edge-93dc02a4-d05b-48ed-b99c-c9b616af3402conditioning-eea2702a-19fb-45b5-9d75-56b4211ec03cnegative_conditioning",
|
||||||
|
"type": "default",
|
||||||
"source": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
"source": "93dc02a4-d05b-48ed-b99c-c9b616af3402",
|
||||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "conditioning",
|
"sourceHandle": "conditioning",
|
||||||
"targetHandle": "negative_conditioning"
|
"targetHandle": "negative_conditioning"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8unet-eea2702a-19fb-45b5-9d75-56b4211ec03cunet",
|
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8unet-eea2702a-19fb-45b5-9d75-56b4211ec03cunet",
|
||||||
|
"type": "default",
|
||||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||||
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
"target": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "unet",
|
"sourceHandle": "unet",
|
||||||
"targetHandle": "unet"
|
"targetHandle": "unet"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-eea2702a-19fb-45b5-9d75-56b4211ec03clatents-58c957f5-0d01-41fc-a803-b2bbf0413d4flatents",
|
"id": "reactflow__edge-eea2702a-19fb-45b5-9d75-56b4211ec03clatents-58c957f5-0d01-41fc-a803-b2bbf0413d4flatents",
|
||||||
|
"type": "default",
|
||||||
"source": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
"source": "eea2702a-19fb-45b5-9d75-56b4211ec03c",
|
||||||
"target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
"target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "latents",
|
"sourceHandle": "latents",
|
||||||
"targetHandle": "latents"
|
"targetHandle": "latents"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8vae-58c957f5-0d01-41fc-a803-b2bbf0413d4fvae",
|
"id": "reactflow__edge-c8d55139-f380-4695-b7f2-8b3d1e1e3db8vae-58c957f5-0d01-41fc-a803-b2bbf0413d4fvae",
|
||||||
|
"type": "default",
|
||||||
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
"source": "c8d55139-f380-4695-b7f2-8b3d1e1e3db8",
|
||||||
"target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
"target": "58c957f5-0d01-41fc-a803-b2bbf0413d4f",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "vae",
|
"sourceHandle": "vae",
|
||||||
"targetHandle": "vae"
|
"targetHandle": "vae"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
@ -25,10 +25,9 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"meta": {
|
"meta": {
|
||||||
"version": "2.0.0",
|
"category": "default",
|
||||||
"category": "default"
|
"version": "2.0.0"
|
||||||
},
|
},
|
||||||
"id": "a9d70c39-4cdd-4176-9942-8ff3fe32d3b1",
|
|
||||||
"nodes": [
|
"nodes": [
|
||||||
{
|
{
|
||||||
"id": "85b77bb2-c67a-416a-b3e8-291abe746c44",
|
"id": "85b77bb2-c67a-416a-b3e8-291abe746c44",
|
||||||
@ -80,12 +79,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 256,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 3425,
|
"x": 3425,
|
||||||
"y": -300
|
"y": -300
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 219
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "24e9d7ed-4836-4ec4-8f9e-e747721f9818",
|
"id": "24e9d7ed-4836-4ec4-8f9e-e747721f9818",
|
||||||
@ -150,12 +149,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 227,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 2500,
|
"x": 2500,
|
||||||
"y": -600
|
"y": -600
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 193
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
"id": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
||||||
@ -243,12 +242,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 252,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 2975,
|
"x": 2975,
|
||||||
"y": -600
|
"y": -600
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 218
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "c3fa6872-2599-4a82-a596-b3446a66cf8b",
|
"id": "c3fa6872-2599-4a82-a596-b3446a66cf8b",
|
||||||
@ -300,12 +299,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 256,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 3425,
|
"x": 3425,
|
||||||
"y": -575
|
"y": -575
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 219
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
"id": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
||||||
@ -318,7 +317,7 @@
|
|||||||
"notes": "",
|
"notes": "",
|
||||||
"isIntermediate": true,
|
"isIntermediate": true,
|
||||||
"useCache": true,
|
"useCache": true,
|
||||||
"version": "1.5.0",
|
"version": "1.5.1",
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"positive_conditioning": {
|
"positive_conditioning": {
|
||||||
"id": "025ff44b-c4c6-4339-91b4-5f461e2cadc5",
|
"id": "025ff44b-c4c6-4339-91b4-5f461e2cadc5",
|
||||||
@ -525,12 +524,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 705,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 3975,
|
"x": 3975,
|
||||||
"y": -575
|
"y": -575
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 612
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "ea18915f-2c5b-4569-b725-8e9e9122e8d3",
|
"id": "ea18915f-2c5b-4569-b725-8e9e9122e8d3",
|
||||||
@ -627,12 +626,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 3425,
|
"x": 3425,
|
||||||
"y": 75
|
"y": 75
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 24
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "6fd74a17-6065-47a5-b48b-f4e2b8fa7953",
|
"id": "6fd74a17-6065-47a5-b48b-f4e2b8fa7953",
|
||||||
@ -685,12 +684,12 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 32,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 3425,
|
"x": 3425,
|
||||||
"y": 0
|
"y": 0
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 24
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "a9683c0a-6b1f-4a5e-8187-c57e764b3400",
|
"id": "a9683c0a-6b1f-4a5e-8187-c57e764b3400",
|
||||||
@ -796,106 +795,106 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"width": 320,
|
|
||||||
"height": 267,
|
|
||||||
"position": {
|
"position": {
|
||||||
"x": 4450,
|
"x": 4450,
|
||||||
"y": -550
|
"y": -550
|
||||||
}
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 224
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"edges": [
|
"edges": [
|
||||||
{
|
{
|
||||||
"id": "6fd74a17-6065-47a5-b48b-f4e2b8fa7953-ea18915f-2c5b-4569-b725-8e9e9122e8d3-collapsed",
|
"id": "6fd74a17-6065-47a5-b48b-f4e2b8fa7953-ea18915f-2c5b-4569-b725-8e9e9122e8d3-collapsed",
|
||||||
|
"type": "collapsed",
|
||||||
"source": "6fd74a17-6065-47a5-b48b-f4e2b8fa7953",
|
"source": "6fd74a17-6065-47a5-b48b-f4e2b8fa7953",
|
||||||
"target": "ea18915f-2c5b-4569-b725-8e9e9122e8d3",
|
"target": "ea18915f-2c5b-4569-b725-8e9e9122e8d3"
|
||||||
"type": "collapsed"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-24e9d7ed-4836-4ec4-8f9e-e747721f9818clip-c41e705b-f2e3-4d1a-83c4-e34bb9344966clip",
|
"id": "reactflow__edge-24e9d7ed-4836-4ec4-8f9e-e747721f9818clip-c41e705b-f2e3-4d1a-83c4-e34bb9344966clip",
|
||||||
|
"type": "default",
|
||||||
"source": "24e9d7ed-4836-4ec4-8f9e-e747721f9818",
|
"source": "24e9d7ed-4836-4ec4-8f9e-e747721f9818",
|
||||||
"target": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
"target": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "clip",
|
"sourceHandle": "clip",
|
||||||
"targetHandle": "clip"
|
"targetHandle": "clip"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-c41e705b-f2e3-4d1a-83c4-e34bb9344966clip-c3fa6872-2599-4a82-a596-b3446a66cf8bclip",
|
"id": "reactflow__edge-c41e705b-f2e3-4d1a-83c4-e34bb9344966clip-c3fa6872-2599-4a82-a596-b3446a66cf8bclip",
|
||||||
|
"type": "default",
|
||||||
"source": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
"source": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
||||||
"target": "c3fa6872-2599-4a82-a596-b3446a66cf8b",
|
"target": "c3fa6872-2599-4a82-a596-b3446a66cf8b",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "clip",
|
"sourceHandle": "clip",
|
||||||
"targetHandle": "clip"
|
"targetHandle": "clip"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-24e9d7ed-4836-4ec4-8f9e-e747721f9818unet-c41e705b-f2e3-4d1a-83c4-e34bb9344966unet",
|
"id": "reactflow__edge-24e9d7ed-4836-4ec4-8f9e-e747721f9818unet-c41e705b-f2e3-4d1a-83c4-e34bb9344966unet",
|
||||||
|
"type": "default",
|
||||||
"source": "24e9d7ed-4836-4ec4-8f9e-e747721f9818",
|
"source": "24e9d7ed-4836-4ec4-8f9e-e747721f9818",
|
||||||
"target": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
"target": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "unet",
|
"sourceHandle": "unet",
|
||||||
"targetHandle": "unet"
|
"targetHandle": "unet"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-c41e705b-f2e3-4d1a-83c4-e34bb9344966unet-ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63unet",
|
"id": "reactflow__edge-c41e705b-f2e3-4d1a-83c4-e34bb9344966unet-ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63unet",
|
||||||
|
"type": "default",
|
||||||
"source": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
"source": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
||||||
"target": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
"target": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "unet",
|
"sourceHandle": "unet",
|
||||||
"targetHandle": "unet"
|
"targetHandle": "unet"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-85b77bb2-c67a-416a-b3e8-291abe746c44conditioning-ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63negative_conditioning",
|
"id": "reactflow__edge-85b77bb2-c67a-416a-b3e8-291abe746c44conditioning-ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63negative_conditioning",
|
||||||
|
"type": "default",
|
||||||
"source": "85b77bb2-c67a-416a-b3e8-291abe746c44",
|
"source": "85b77bb2-c67a-416a-b3e8-291abe746c44",
|
||||||
"target": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
"target": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "conditioning",
|
"sourceHandle": "conditioning",
|
||||||
"targetHandle": "negative_conditioning"
|
"targetHandle": "negative_conditioning"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-c3fa6872-2599-4a82-a596-b3446a66cf8bconditioning-ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63positive_conditioning",
|
"id": "reactflow__edge-c3fa6872-2599-4a82-a596-b3446a66cf8bconditioning-ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63positive_conditioning",
|
||||||
|
"type": "default",
|
||||||
"source": "c3fa6872-2599-4a82-a596-b3446a66cf8b",
|
"source": "c3fa6872-2599-4a82-a596-b3446a66cf8b",
|
||||||
"target": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
"target": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "conditioning",
|
"sourceHandle": "conditioning",
|
||||||
"targetHandle": "positive_conditioning"
|
"targetHandle": "positive_conditioning"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-ea18915f-2c5b-4569-b725-8e9e9122e8d3noise-ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63noise",
|
"id": "reactflow__edge-ea18915f-2c5b-4569-b725-8e9e9122e8d3noise-ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63noise",
|
||||||
|
"type": "default",
|
||||||
"source": "ea18915f-2c5b-4569-b725-8e9e9122e8d3",
|
"source": "ea18915f-2c5b-4569-b725-8e9e9122e8d3",
|
||||||
"target": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
"target": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "noise",
|
"sourceHandle": "noise",
|
||||||
"targetHandle": "noise"
|
"targetHandle": "noise"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-6fd74a17-6065-47a5-b48b-f4e2b8fa7953value-ea18915f-2c5b-4569-b725-8e9e9122e8d3seed",
|
"id": "reactflow__edge-6fd74a17-6065-47a5-b48b-f4e2b8fa7953value-ea18915f-2c5b-4569-b725-8e9e9122e8d3seed",
|
||||||
|
"type": "default",
|
||||||
"source": "6fd74a17-6065-47a5-b48b-f4e2b8fa7953",
|
"source": "6fd74a17-6065-47a5-b48b-f4e2b8fa7953",
|
||||||
"target": "ea18915f-2c5b-4569-b725-8e9e9122e8d3",
|
"target": "ea18915f-2c5b-4569-b725-8e9e9122e8d3",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "value",
|
"sourceHandle": "value",
|
||||||
"targetHandle": "seed"
|
"targetHandle": "seed"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63latents-a9683c0a-6b1f-4a5e-8187-c57e764b3400latents",
|
"id": "reactflow__edge-ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63latents-a9683c0a-6b1f-4a5e-8187-c57e764b3400latents",
|
||||||
|
"type": "default",
|
||||||
"source": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
"source": "ad487d0c-dcbb-49c5-bb8e-b28d4cbc5a63",
|
||||||
"target": "a9683c0a-6b1f-4a5e-8187-c57e764b3400",
|
"target": "a9683c0a-6b1f-4a5e-8187-c57e764b3400",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "latents",
|
"sourceHandle": "latents",
|
||||||
"targetHandle": "latents"
|
"targetHandle": "latents"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-24e9d7ed-4836-4ec4-8f9e-e747721f9818vae-a9683c0a-6b1f-4a5e-8187-c57e764b3400vae",
|
"id": "reactflow__edge-24e9d7ed-4836-4ec4-8f9e-e747721f9818vae-a9683c0a-6b1f-4a5e-8187-c57e764b3400vae",
|
||||||
|
"type": "default",
|
||||||
"source": "24e9d7ed-4836-4ec4-8f9e-e747721f9818",
|
"source": "24e9d7ed-4836-4ec4-8f9e-e747721f9818",
|
||||||
"target": "a9683c0a-6b1f-4a5e-8187-c57e764b3400",
|
"target": "a9683c0a-6b1f-4a5e-8187-c57e764b3400",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "vae",
|
"sourceHandle": "vae",
|
||||||
"targetHandle": "vae"
|
"targetHandle": "vae"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "reactflow__edge-c41e705b-f2e3-4d1a-83c4-e34bb9344966clip-85b77bb2-c67a-416a-b3e8-291abe746c44clip",
|
"id": "reactflow__edge-c41e705b-f2e3-4d1a-83c4-e34bb9344966clip-85b77bb2-c67a-416a-b3e8-291abe746c44clip",
|
||||||
|
"type": "default",
|
||||||
"source": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
"source": "c41e705b-f2e3-4d1a-83c4-e34bb9344966",
|
||||||
"target": "85b77bb2-c67a-416a-b3e8-291abe746c44",
|
"target": "85b77bb2-c67a-416a-b3e8-291abe746c44",
|
||||||
"type": "default",
|
|
||||||
"sourceHandle": "clip",
|
"sourceHandle": "clip",
|
||||||
"targetHandle": "clip"
|
"targetHandle": "clip"
|
||||||
}
|
}
|
||||||
|
@ -169,7 +169,7 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase):
|
|||||||
|
|
||||||
self._cursor.execute(count_query, count_params)
|
self._cursor.execute(count_query, count_params)
|
||||||
total = self._cursor.fetchone()[0]
|
total = self._cursor.fetchone()[0]
|
||||||
pages = int(total / per_page) + 1
|
pages = total // per_page + (total % per_page > 0)
|
||||||
|
|
||||||
return PaginatedResults(
|
return PaginatedResults(
|
||||||
items=workflows,
|
items=workflows,
|
||||||
|
@ -283,11 +283,16 @@ class ModelInstall(object):
|
|||||||
|
|
||||||
def _remove_installed(self, model_list: List[str]):
|
def _remove_installed(self, model_list: List[str]):
|
||||||
all_models = self.all_models()
|
all_models = self.all_models()
|
||||||
|
models_to_remove = []
|
||||||
|
|
||||||
for path in model_list:
|
for path in model_list:
|
||||||
key = self.reverse_paths.get(path)
|
key = self.reverse_paths.get(path)
|
||||||
if key and all_models[key].installed:
|
if key and all_models[key].installed:
|
||||||
logger.warning(f"{path} already installed. Skipping.")
|
models_to_remove.append(path)
|
||||||
model_list.remove(path)
|
|
||||||
|
for path in models_to_remove:
|
||||||
|
logger.warning(f"{path} already installed. Skipping")
|
||||||
|
model_list.remove(path)
|
||||||
|
|
||||||
def _add_required_models(self, model_list: List[str]):
|
def _add_required_models(self, model_list: List[str]):
|
||||||
additional_models = []
|
additional_models = []
|
||||||
|
31
invokeai/backend/model_management/detect_baked_in_vae.py
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
# Copyright (c) 2024 Lincoln Stein and the InvokeAI Development Team
|
||||||
|
"""
|
||||||
|
This module exports the function has_baked_in_sdxl_vae().
|
||||||
|
It returns True if an SDXL checkpoint model has the original SDXL 1.0 VAE,
|
||||||
|
which doesn't work properly in fp16 mode.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import hashlib
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from safetensors.torch import load_file
|
||||||
|
|
||||||
|
SDXL_1_0_VAE_HASH = "bc40b16c3a0fa4625abdfc01c04ffc21bf3cefa6af6c7768ec61eb1f1ac0da51"
|
||||||
|
|
||||||
|
|
||||||
|
def has_baked_in_sdxl_vae(checkpoint_path: Path) -> bool:
|
||||||
|
"""Return true if the checkpoint contains a custom (non SDXL-1.0) VAE."""
|
||||||
|
hash = _vae_hash(checkpoint_path)
|
||||||
|
return hash != SDXL_1_0_VAE_HASH
|
||||||
|
|
||||||
|
|
||||||
|
def _vae_hash(checkpoint_path: Path) -> str:
|
||||||
|
checkpoint = load_file(checkpoint_path, device="cpu")
|
||||||
|
vae_keys = [x for x in checkpoint.keys() if x.startswith("first_stage_model.")]
|
||||||
|
hash = hashlib.new("sha256")
|
||||||
|
for key in vae_keys:
|
||||||
|
value = checkpoint[key]
|
||||||
|
hash.update(bytes(key, "UTF-8"))
|
||||||
|
hash.update(bytes(str(value), "UTF-8"))
|
||||||
|
|
||||||
|
return hash.hexdigest()
|
@ -13,6 +13,7 @@ from safetensors.torch import load_file
|
|||||||
from transformers import CLIPTextModel, CLIPTokenizer
|
from transformers import CLIPTextModel, CLIPTokenizer
|
||||||
|
|
||||||
from invokeai.app.shared.models import FreeUConfig
|
from invokeai.app.shared.models import FreeUConfig
|
||||||
|
from invokeai.backend.model_management.model_load_optimizations import skip_torch_weight_init
|
||||||
|
|
||||||
from .models.lora import LoRAModel
|
from .models.lora import LoRAModel
|
||||||
|
|
||||||
@ -211,8 +212,12 @@ class ModelPatcher:
|
|||||||
for i in range(ti_embedding.shape[0]):
|
for i in range(ti_embedding.shape[0]):
|
||||||
new_tokens_added += ti_tokenizer.add_tokens(_get_trigger(ti_name, i))
|
new_tokens_added += ti_tokenizer.add_tokens(_get_trigger(ti_name, i))
|
||||||
|
|
||||||
# modify text_encoder
|
# Modify text_encoder.
|
||||||
text_encoder.resize_token_embeddings(init_tokens_count + new_tokens_added, pad_to_multiple_of)
|
# resize_token_embeddings(...) constructs a new torch.nn.Embedding internally. Initializing the weights of
|
||||||
|
# this embedding is slow and unnecessary, so we wrap this step in skip_torch_weight_init() to save some
|
||||||
|
# time.
|
||||||
|
with skip_torch_weight_init():
|
||||||
|
text_encoder.resize_token_embeddings(init_tokens_count + new_tokens_added, pad_to_multiple_of)
|
||||||
model_embeddings = text_encoder.get_input_embeddings()
|
model_embeddings = text_encoder.get_input_embeddings()
|
||||||
|
|
||||||
for ti_name, ti in ti_list:
|
for ti_name, ti in ti_list:
|
||||||
|
@ -759,7 +759,7 @@ class ModelManager(object):
|
|||||||
model_type: ModelType,
|
model_type: ModelType,
|
||||||
new_name: Optional[str] = None,
|
new_name: Optional[str] = None,
|
||||||
new_base: Optional[BaseModelType] = None,
|
new_base: Optional[BaseModelType] = None,
|
||||||
):
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Rename or rebase a model.
|
Rename or rebase a model.
|
||||||
"""
|
"""
|
||||||
@ -781,6 +781,9 @@ class ModelManager(object):
|
|||||||
|
|
||||||
# if this is a model file/directory that we manage ourselves, we need to move it
|
# if this is a model file/directory that we manage ourselves, we need to move it
|
||||||
if old_path.is_relative_to(self.app_config.models_path):
|
if old_path.is_relative_to(self.app_config.models_path):
|
||||||
|
# keep the suffix!
|
||||||
|
if old_path.is_file():
|
||||||
|
new_name = Path(new_name).with_suffix(old_path.suffix).as_posix()
|
||||||
new_path = self.resolve_model_path(
|
new_path = self.resolve_model_path(
|
||||||
Path(
|
Path(
|
||||||
BaseModelType(new_base).value,
|
BaseModelType(new_base).value,
|
||||||
|
@ -370,6 +370,8 @@ class LoRACheckpointProbe(CheckpointProbeBase):
|
|||||||
return BaseModelType.StableDiffusion1
|
return BaseModelType.StableDiffusion1
|
||||||
elif token_vector_length == 1024:
|
elif token_vector_length == 1024:
|
||||||
return BaseModelType.StableDiffusion2
|
return BaseModelType.StableDiffusion2
|
||||||
|
elif token_vector_length == 1280:
|
||||||
|
return BaseModelType.StableDiffusionXL # recognizes format at https://civitai.com/models/224641
|
||||||
elif token_vector_length == 2048:
|
elif token_vector_length == 2048:
|
||||||
return BaseModelType.StableDiffusionXL
|
return BaseModelType.StableDiffusionXL
|
||||||
else:
|
else:
|
||||||
|
@ -1,11 +1,16 @@
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
from pathlib import Path
|
||||||
from typing import Literal, Optional
|
from typing import Literal, Optional
|
||||||
|
|
||||||
from omegaconf import OmegaConf
|
from omegaconf import OmegaConf
|
||||||
from pydantic import Field
|
from pydantic import Field
|
||||||
|
|
||||||
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
|
from invokeai.backend.model_management.detect_baked_in_vae import has_baked_in_sdxl_vae
|
||||||
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
|
|
||||||
from .base import (
|
from .base import (
|
||||||
BaseModelType,
|
BaseModelType,
|
||||||
DiffusersModel,
|
DiffusersModel,
|
||||||
@ -116,14 +121,28 @@ class StableDiffusionXLModel(DiffusersModel):
|
|||||||
# The convert script adapted from the diffusers package uses
|
# The convert script adapted from the diffusers package uses
|
||||||
# strings for the base model type. To avoid making too many
|
# strings for the base model type. To avoid making too many
|
||||||
# source code changes, we simply translate here
|
# source code changes, we simply translate here
|
||||||
|
if Path(output_path).exists():
|
||||||
|
return output_path
|
||||||
|
|
||||||
if isinstance(config, cls.CheckpointConfig):
|
if isinstance(config, cls.CheckpointConfig):
|
||||||
from invokeai.backend.model_management.models.stable_diffusion import _convert_ckpt_and_cache
|
from invokeai.backend.model_management.models.stable_diffusion import _convert_ckpt_and_cache
|
||||||
|
|
||||||
|
# Hack in VAE-fp16 fix - If model sdxl-vae-fp16-fix is installed,
|
||||||
|
# then we bake it into the converted model unless there is already
|
||||||
|
# a nonstandard VAE installed.
|
||||||
|
kwargs = {}
|
||||||
|
app_config = InvokeAIAppConfig.get_config()
|
||||||
|
vae_path = app_config.models_path / "sdxl/vae/sdxl-vae-fp16-fix"
|
||||||
|
if vae_path.exists() and not has_baked_in_sdxl_vae(Path(model_path)):
|
||||||
|
InvokeAILogger.get_logger().warning("No baked-in VAE detected. Inserting sdxl-vae-fp16-fix.")
|
||||||
|
kwargs["vae_path"] = vae_path
|
||||||
|
|
||||||
return _convert_ckpt_and_cache(
|
return _convert_ckpt_and_cache(
|
||||||
version=base_model,
|
version=base_model,
|
||||||
model_config=config,
|
model_config=config,
|
||||||
output_path=output_path,
|
output_path=output_path,
|
||||||
use_safetensors=False, # corrupts sdxl models for some reason
|
use_safetensors=False, # corrupts sdxl models for some reason
|
||||||
|
**kwargs,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
return model_path
|
return model_path
|
||||||
|
@ -6,6 +6,7 @@ from .config import (
|
|||||||
InvalidModelConfigException,
|
InvalidModelConfigException,
|
||||||
ModelConfigFactory,
|
ModelConfigFactory,
|
||||||
ModelFormat,
|
ModelFormat,
|
||||||
|
ModelRepoVariant,
|
||||||
ModelType,
|
ModelType,
|
||||||
ModelVariantType,
|
ModelVariantType,
|
||||||
SchedulerPredictionType,
|
SchedulerPredictionType,
|
||||||
@ -15,15 +16,16 @@ from .probe import ModelProbe
|
|||||||
from .search import ModelSearch
|
from .search import ModelSearch
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"ModelProbe",
|
"AnyModelConfig",
|
||||||
"ModelSearch",
|
"BaseModelType",
|
||||||
|
"ModelRepoVariant",
|
||||||
"InvalidModelConfigException",
|
"InvalidModelConfigException",
|
||||||
"ModelConfigFactory",
|
"ModelConfigFactory",
|
||||||
"BaseModelType",
|
|
||||||
"ModelType",
|
|
||||||
"SubModelType",
|
|
||||||
"ModelVariantType",
|
|
||||||
"ModelFormat",
|
"ModelFormat",
|
||||||
|
"ModelProbe",
|
||||||
|
"ModelSearch",
|
||||||
|
"ModelType",
|
||||||
|
"ModelVariantType",
|
||||||
"SchedulerPredictionType",
|
"SchedulerPredictionType",
|
||||||
"AnyModelConfig",
|
"SubModelType",
|
||||||
]
|
]
|
||||||
|
@ -99,6 +99,17 @@ class SchedulerPredictionType(str, Enum):
|
|||||||
Sample = "sample"
|
Sample = "sample"
|
||||||
|
|
||||||
|
|
||||||
|
class ModelRepoVariant(str, Enum):
|
||||||
|
"""Various hugging face variants on the diffusers format."""
|
||||||
|
|
||||||
|
DEFAULT = "default" # model files without "fp16" or other qualifier
|
||||||
|
FP16 = "fp16"
|
||||||
|
FP32 = "fp32"
|
||||||
|
ONNX = "onnx"
|
||||||
|
OPENVINO = "openvino"
|
||||||
|
FLAX = "flax"
|
||||||
|
|
||||||
|
|
||||||
class ModelConfigBase(BaseModel):
|
class ModelConfigBase(BaseModel):
|
||||||
"""Base class for model configuration information."""
|
"""Base class for model configuration information."""
|
||||||
|
|
||||||
|
50
invokeai/backend/model_manager/metadata/__init__.py
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
"""
|
||||||
|
Initialization file for invokeai.backend.model_manager.metadata
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
from invokeai.backend.model_manager.metadata import(
|
||||||
|
AnyModelRepoMetadata,
|
||||||
|
CommercialUsage,
|
||||||
|
LicenseRestrictions,
|
||||||
|
HuggingFaceMetadata,
|
||||||
|
CivitaiMetadata,
|
||||||
|
)
|
||||||
|
|
||||||
|
from invokeai.backend.model_manager.metadata.fetch import CivitaiMetadataFetch
|
||||||
|
|
||||||
|
data = CivitaiMetadataFetch().from_url("https://civitai.com/models/206883/split")
|
||||||
|
assert isinstance(data, CivitaiMetadata)
|
||||||
|
if data.allow_commercial_use:
|
||||||
|
print("Commercial use of this model is allowed")
|
||||||
|
"""
|
||||||
|
from .fetch import CivitaiMetadataFetch, HuggingFaceMetadataFetch
|
||||||
|
from .metadata_base import (
|
||||||
|
AnyModelRepoMetadata,
|
||||||
|
AnyModelRepoMetadataValidator,
|
||||||
|
BaseMetadata,
|
||||||
|
CivitaiMetadata,
|
||||||
|
CommercialUsage,
|
||||||
|
HuggingFaceMetadata,
|
||||||
|
LicenseRestrictions,
|
||||||
|
ModelMetadataWithFiles,
|
||||||
|
RemoteModelFile,
|
||||||
|
UnknownMetadataException,
|
||||||
|
)
|
||||||
|
from .metadata_store import ModelMetadataStore
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"AnyModelRepoMetadata",
|
||||||
|
"AnyModelRepoMetadataValidator",
|
||||||
|
"CivitaiMetadata",
|
||||||
|
"CivitaiMetadataFetch",
|
||||||
|
"CommercialUsage",
|
||||||
|
"HuggingFaceMetadata",
|
||||||
|
"HuggingFaceMetadataFetch",
|
||||||
|
"LicenseRestrictions",
|
||||||
|
"ModelMetadataStore",
|
||||||
|
"BaseMetadata",
|
||||||
|
"ModelMetadataWithFiles",
|
||||||
|
"RemoteModelFile",
|
||||||
|
"UnknownMetadataException",
|
||||||
|
]
|
21
invokeai/backend/model_manager/metadata/fetch/__init__.py
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
"""
|
||||||
|
Initialization file for invokeai.backend.model_manager.metadata.fetch
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
from invokeai.backend.model_manager.metadata.fetch import (
|
||||||
|
CivitaiMetadataFetch,
|
||||||
|
HuggingFaceMetadataFetch,
|
||||||
|
)
|
||||||
|
from invokeai.backend.model_manager.metadata import CivitaiMetadata
|
||||||
|
|
||||||
|
data = CivitaiMetadataFetch().from_url("https://civitai.com/models/206883/split")
|
||||||
|
assert isinstance(data, CivitaiMetadata)
|
||||||
|
if data.allow_commercial_use:
|
||||||
|
print("Commercial use of this model is allowed")
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .civitai import CivitaiMetadataFetch
|
||||||
|
from .fetch_base import ModelMetadataFetchBase
|
||||||
|
from .huggingface import HuggingFaceMetadataFetch
|
||||||
|
|
||||||
|
__all__ = ["ModelMetadataFetchBase", "CivitaiMetadataFetch", "HuggingFaceMetadataFetch"]
|
187
invokeai/backend/model_manager/metadata/fetch/civitai.py
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Development Team
|
||||||
|
|
||||||
|
"""
|
||||||
|
This module fetches model metadata objects from the Civitai model repository.
|
||||||
|
In addition to the `from_url()` and `from_id()` methods inherited from the
|
||||||
|
`ModelMetadataFetchBase` base class.
|
||||||
|
|
||||||
|
Civitai has two separate ID spaces: a model ID and a version ID. The
|
||||||
|
version ID corresponds to a specific model, and is the ID accepted by
|
||||||
|
`from_id()`. The model ID corresponds to a family of related models,
|
||||||
|
such as different training checkpoints or 16 vs 32-bit versions. The
|
||||||
|
`from_civitai_modelid()` method will accept a model ID and return the
|
||||||
|
metadata from the default version within this model set. The default
|
||||||
|
version is the same as what the user sees when they click on a model's
|
||||||
|
thumbnail.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
from invokeai.backend.model_manager.metadata.fetch import CivitaiMetadataFetch
|
||||||
|
|
||||||
|
fetcher = CivitaiMetadataFetch()
|
||||||
|
metadata = fetcher.from_url("https://civitai.com/models/206883/split")
|
||||||
|
print(metadata.trained_words)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
from datetime import datetime
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
|
import requests
|
||||||
|
from pydantic.networks import AnyHttpUrl
|
||||||
|
from requests.sessions import Session
|
||||||
|
|
||||||
|
from ..metadata_base import (
|
||||||
|
AnyModelRepoMetadata,
|
||||||
|
CivitaiMetadata,
|
||||||
|
CommercialUsage,
|
||||||
|
LicenseRestrictions,
|
||||||
|
RemoteModelFile,
|
||||||
|
UnknownMetadataException,
|
||||||
|
)
|
||||||
|
from .fetch_base import ModelMetadataFetchBase
|
||||||
|
|
||||||
|
CIVITAI_MODEL_PAGE_RE = r"https?://civitai.com/models/(\d+)"
|
||||||
|
CIVITAI_VERSION_PAGE_RE = r"https?://civitai.com/models/(\d+)\?modelVersionId=(\d+)"
|
||||||
|
CIVITAI_DOWNLOAD_RE = r"https?://civitai.com/api/download/models/(\d+)"
|
||||||
|
|
||||||
|
CIVITAI_VERSION_ENDPOINT = "https://civitai.com/api/v1/model-versions/"
|
||||||
|
CIVITAI_MODEL_ENDPOINT = "https://civitai.com/api/v1/models/"
|
||||||
|
|
||||||
|
|
||||||
|
class CivitaiMetadataFetch(ModelMetadataFetchBase):
|
||||||
|
"""Fetch model metadata from Civitai."""
|
||||||
|
|
||||||
|
def __init__(self, session: Optional[Session] = None):
|
||||||
|
"""
|
||||||
|
Initialize the fetcher with an optional requests.sessions.Session object.
|
||||||
|
|
||||||
|
By providing a configurable Session object, we can support unit tests on
|
||||||
|
this module without an internet connection.
|
||||||
|
"""
|
||||||
|
self._requests = session or requests.Session()
|
||||||
|
|
||||||
|
def from_url(self, url: AnyHttpUrl) -> AnyModelRepoMetadata:
|
||||||
|
"""
|
||||||
|
Given a URL to a CivitAI model or version page, return a ModelMetadata object.
|
||||||
|
|
||||||
|
In the event that the URL points to a model page without the particular version
|
||||||
|
indicated, the default model version is returned. Otherwise, the requested version
|
||||||
|
is returned.
|
||||||
|
"""
|
||||||
|
if match := re.match(CIVITAI_VERSION_PAGE_RE, str(url), re.IGNORECASE):
|
||||||
|
model_id = match.group(1)
|
||||||
|
version_id = match.group(2)
|
||||||
|
return self.from_civitai_versionid(int(version_id), int(model_id))
|
||||||
|
elif match := re.match(CIVITAI_MODEL_PAGE_RE, str(url), re.IGNORECASE):
|
||||||
|
model_id = match.group(1)
|
||||||
|
return self.from_civitai_modelid(int(model_id))
|
||||||
|
elif match := re.match(CIVITAI_DOWNLOAD_RE, str(url), re.IGNORECASE):
|
||||||
|
version_id = match.group(1)
|
||||||
|
return self.from_civitai_versionid(int(version_id))
|
||||||
|
raise UnknownMetadataException("The url '{url}' does not match any known Civitai URL patterns")
|
||||||
|
|
||||||
|
def from_id(self, id: str) -> AnyModelRepoMetadata:
|
||||||
|
"""
|
||||||
|
Given a Civitai model version ID, return a ModelRepoMetadata object.
|
||||||
|
|
||||||
|
May raise an `UnknownMetadataException`.
|
||||||
|
"""
|
||||||
|
return self.from_civitai_versionid(int(id))
|
||||||
|
|
||||||
|
def from_civitai_modelid(self, model_id: int) -> CivitaiMetadata:
|
||||||
|
"""
|
||||||
|
Return metadata from the default version of the indicated model.
|
||||||
|
|
||||||
|
May raise an `UnknownMetadataException`.
|
||||||
|
"""
|
||||||
|
model_url = CIVITAI_MODEL_ENDPOINT + str(model_id)
|
||||||
|
model_json = self._requests.get(model_url).json()
|
||||||
|
return self._from_model_json(model_json)
|
||||||
|
|
||||||
|
def _from_model_json(self, model_json: Dict[str, Any], version_id: Optional[int] = None) -> CivitaiMetadata:
|
||||||
|
try:
|
||||||
|
version_id = version_id or model_json["modelVersions"][0]["id"]
|
||||||
|
except TypeError as excp:
|
||||||
|
raise UnknownMetadataException from excp
|
||||||
|
|
||||||
|
# loop till we find the section containing the version requested
|
||||||
|
version_sections = [x for x in model_json["modelVersions"] if x["id"] == version_id]
|
||||||
|
if not version_sections:
|
||||||
|
raise UnknownMetadataException(f"Version {version_id} not found in model metadata")
|
||||||
|
|
||||||
|
version_json = version_sections[0]
|
||||||
|
safe_thumbnails = [x["url"] for x in version_json["images"] if x["nsfw"] == "None"]
|
||||||
|
|
||||||
|
# Civitai has one "primary" file plus others such as VAEs. We only fetch the primary.
|
||||||
|
primary = [x for x in version_json["files"] if x.get("primary")]
|
||||||
|
assert len(primary) == 1
|
||||||
|
primary_file = primary[0]
|
||||||
|
|
||||||
|
url = primary_file["downloadUrl"]
|
||||||
|
if "?" not in url: # work around apparent bug in civitai api
|
||||||
|
metadata_string = ""
|
||||||
|
for key, value in primary_file["metadata"].items():
|
||||||
|
if not value:
|
||||||
|
continue
|
||||||
|
metadata_string += f"&{key}={value}"
|
||||||
|
url = url + f"?type={primary_file['type']}{metadata_string}"
|
||||||
|
model_files = [
|
||||||
|
RemoteModelFile(
|
||||||
|
url=url,
|
||||||
|
path=Path(primary_file["name"]),
|
||||||
|
size=int(primary_file["sizeKB"] * 1024),
|
||||||
|
sha256=primary_file["hashes"]["SHA256"],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
return CivitaiMetadata(
|
||||||
|
id=model_json["id"],
|
||||||
|
name=version_json["name"],
|
||||||
|
version_id=version_json["id"],
|
||||||
|
version_name=version_json["name"],
|
||||||
|
created=datetime.fromisoformat(_fix_timezone(version_json["createdAt"])),
|
||||||
|
updated=datetime.fromisoformat(_fix_timezone(version_json["updatedAt"])),
|
||||||
|
published=datetime.fromisoformat(_fix_timezone(version_json["publishedAt"])),
|
||||||
|
base_model_trained_on=version_json["baseModel"], # note - need a dictionary to turn into a BaseModelType
|
||||||
|
files=model_files,
|
||||||
|
download_url=version_json["downloadUrl"],
|
||||||
|
thumbnail_url=safe_thumbnails[0] if safe_thumbnails else None,
|
||||||
|
author=model_json["creator"]["username"],
|
||||||
|
description=model_json["description"],
|
||||||
|
version_description=version_json["description"] or "",
|
||||||
|
tags=model_json["tags"],
|
||||||
|
trained_words=version_json["trainedWords"],
|
||||||
|
nsfw=model_json["nsfw"],
|
||||||
|
restrictions=LicenseRestrictions(
|
||||||
|
AllowNoCredit=model_json["allowNoCredit"],
|
||||||
|
AllowCommercialUse=CommercialUsage(model_json["allowCommercialUse"]),
|
||||||
|
AllowDerivatives=model_json["allowDerivatives"],
|
||||||
|
AllowDifferentLicense=model_json["allowDifferentLicense"],
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def from_civitai_versionid(self, version_id: int, model_id: Optional[int] = None) -> CivitaiMetadata:
|
||||||
|
"""
|
||||||
|
Return a CivitaiMetadata object given a model version id.
|
||||||
|
|
||||||
|
May raise an `UnknownMetadataException`.
|
||||||
|
"""
|
||||||
|
if model_id is None:
|
||||||
|
version_url = CIVITAI_VERSION_ENDPOINT + str(version_id)
|
||||||
|
version = self._requests.get(version_url).json()
|
||||||
|
model_id = version["modelId"]
|
||||||
|
|
||||||
|
model_url = CIVITAI_MODEL_ENDPOINT + str(model_id)
|
||||||
|
model_json = self._requests.get(model_url).json()
|
||||||
|
return self._from_model_json(model_json, version_id)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, json: str) -> CivitaiMetadata:
|
||||||
|
"""Given the JSON representation of the metadata, return the corresponding Pydantic object."""
|
||||||
|
metadata = CivitaiMetadata.model_validate_json(json)
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
|
||||||
|
def _fix_timezone(date: str) -> str:
|
||||||
|
return re.sub(r"Z$", "+00:00", date)
|
61
invokeai/backend/model_manager/metadata/fetch/fetch_base.py
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Development Team
|
||||||
|
|
||||||
|
"""
|
||||||
|
This module is the base class for subclasses that fetch metadata from model repositories
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
from invokeai.backend.model_manager.metadata.fetch import CivitAIMetadataFetch
|
||||||
|
|
||||||
|
fetcher = CivitaiMetadataFetch()
|
||||||
|
metadata = fetcher.from_url("https://civitai.com/models/206883/split")
|
||||||
|
print(metadata.trained_words)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from pydantic.networks import AnyHttpUrl
|
||||||
|
from requests.sessions import Session
|
||||||
|
|
||||||
|
from ..metadata_base import AnyModelRepoMetadata, AnyModelRepoMetadataValidator
|
||||||
|
|
||||||
|
|
||||||
|
class ModelMetadataFetchBase(ABC):
|
||||||
|
"""Fetch metadata from remote generative model repositories."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def __init__(self, session: Optional[Session] = None):
|
||||||
|
"""
|
||||||
|
Initialize the fetcher with an optional requests.sessions.Session object.
|
||||||
|
|
||||||
|
By providing a configurable Session object, we can support unit tests on
|
||||||
|
this module without an internet connection.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def from_url(self, url: AnyHttpUrl) -> AnyModelRepoMetadata:
|
||||||
|
"""
|
||||||
|
Given a URL to a model repository, return a ModelMetadata object.
|
||||||
|
|
||||||
|
This method will raise a `UnknownMetadataException`
|
||||||
|
in the event that the requested model metadata is not found at the provided location.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def from_id(self, id: str) -> AnyModelRepoMetadata:
|
||||||
|
"""
|
||||||
|
Given an ID for a model, return a ModelMetadata object.
|
||||||
|
|
||||||
|
This method will raise a `UnknownMetadataException`
|
||||||
|
in the event that the requested model's metadata is not found at the provided id.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, json: str) -> AnyModelRepoMetadata:
|
||||||
|
"""Given the JSON representation of the metadata, return the corresponding Pydantic object."""
|
||||||
|
metadata = AnyModelRepoMetadataValidator.validate_json(json)
|
||||||
|
return metadata
|
92
invokeai/backend/model_manager/metadata/fetch/huggingface.py
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Development Team
|
||||||
|
|
||||||
|
"""
|
||||||
|
This module fetches model metadata objects from the HuggingFace model repository,
|
||||||
|
using either a `repo_id` or the model page URL.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
from invokeai.backend.model_manager.metadata.fetch import HuggingFaceMetadataFetch
|
||||||
|
|
||||||
|
fetcher = HuggingFaceMetadataFetch()
|
||||||
|
metadata = fetcher.from_url("https://huggingface.co/stabilityai/sdxl-turbo")
|
||||||
|
print(metadata.tags)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import requests
|
||||||
|
from huggingface_hub import HfApi, configure_http_backend, hf_hub_url
|
||||||
|
from huggingface_hub.utils._errors import RepositoryNotFoundError
|
||||||
|
from pydantic.networks import AnyHttpUrl
|
||||||
|
from requests.sessions import Session
|
||||||
|
|
||||||
|
from ..metadata_base import (
|
||||||
|
AnyModelRepoMetadata,
|
||||||
|
HuggingFaceMetadata,
|
||||||
|
RemoteModelFile,
|
||||||
|
UnknownMetadataException,
|
||||||
|
)
|
||||||
|
from .fetch_base import ModelMetadataFetchBase
|
||||||
|
|
||||||
|
HF_MODEL_RE = r"https?://huggingface.co/([\w\-.]+/[\w\-.]+)"
|
||||||
|
|
||||||
|
|
||||||
|
class HuggingFaceMetadataFetch(ModelMetadataFetchBase):
|
||||||
|
"""Fetch model metadata from HuggingFace."""
|
||||||
|
|
||||||
|
def __init__(self, session: Optional[Session] = None):
|
||||||
|
"""
|
||||||
|
Initialize the fetcher with an optional requests.sessions.Session object.
|
||||||
|
|
||||||
|
By providing a configurable Session object, we can support unit tests on
|
||||||
|
this module without an internet connection.
|
||||||
|
"""
|
||||||
|
self._requests = session or requests.Session()
|
||||||
|
configure_http_backend(backend_factory=lambda: self._requests)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, json: str) -> HuggingFaceMetadata:
|
||||||
|
"""Given the JSON representation of the metadata, return the corresponding Pydantic object."""
|
||||||
|
metadata = HuggingFaceMetadata.model_validate_json(json)
|
||||||
|
return metadata
|
||||||
|
|
||||||
|
def from_id(self, id: str) -> AnyModelRepoMetadata:
|
||||||
|
"""Return a HuggingFaceMetadata object given the model's repo_id."""
|
||||||
|
try:
|
||||||
|
model_info = HfApi().model_info(repo_id=id, files_metadata=True)
|
||||||
|
except RepositoryNotFoundError as excp:
|
||||||
|
raise UnknownMetadataException(f"'{id}' not found. See trace for details.") from excp
|
||||||
|
|
||||||
|
_, name = id.split("/")
|
||||||
|
return HuggingFaceMetadata(
|
||||||
|
id=model_info.id,
|
||||||
|
author=model_info.author,
|
||||||
|
name=name,
|
||||||
|
last_modified=model_info.last_modified,
|
||||||
|
tag_dict=model_info.card_data.to_dict() if model_info.card_data else {},
|
||||||
|
tags=model_info.tags,
|
||||||
|
files=[
|
||||||
|
RemoteModelFile(
|
||||||
|
url=hf_hub_url(id, x.rfilename),
|
||||||
|
path=Path(name, x.rfilename),
|
||||||
|
size=x.size,
|
||||||
|
sha256=x.lfs.get("sha256") if x.lfs else None,
|
||||||
|
)
|
||||||
|
for x in model_info.siblings
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
def from_url(self, url: AnyHttpUrl) -> AnyModelRepoMetadata:
|
||||||
|
"""
|
||||||
|
Return a HuggingFaceMetadata object given the model's web page URL.
|
||||||
|
|
||||||
|
In the case of an invalid or missing URL, raises a ModelNotFound exception.
|
||||||
|
"""
|
||||||
|
if match := re.match(HF_MODEL_RE, str(url), re.IGNORECASE):
|
||||||
|
repo_id = match.group(1)
|
||||||
|
return self.from_id(repo_id)
|
||||||
|
else:
|
||||||
|
raise UnknownMetadataException(f"'{url}' does not look like a HuggingFace model page")
|
202
invokeai/backend/model_manager/metadata/metadata_base.py
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Development Team
|
||||||
|
|
||||||
|
"""This module defines core text-to-image model metadata fields.
|
||||||
|
|
||||||
|
Metadata comprises any descriptive information that is not essential
|
||||||
|
for getting the model to run. For example "author" is metadata, while
|
||||||
|
"type", "base" and "format" are not. The latter fields are part of the
|
||||||
|
model's config, as defined in invokeai.backend.model_manager.config.
|
||||||
|
|
||||||
|
Note that the "name" and "description" are also present in `config`
|
||||||
|
records. This is intentional. The config record fields are intended to
|
||||||
|
be editable by the user as a form of customization. The metadata
|
||||||
|
versions of these fields are intended to be kept in sync with the
|
||||||
|
remote repo.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from enum import Enum
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, List, Literal, Optional, Set, Tuple, Union
|
||||||
|
|
||||||
|
from huggingface_hub import configure_http_backend, hf_hub_url
|
||||||
|
from pydantic import BaseModel, Field, TypeAdapter
|
||||||
|
from pydantic.networks import AnyHttpUrl
|
||||||
|
from requests.sessions import Session
|
||||||
|
from typing_extensions import Annotated
|
||||||
|
|
||||||
|
from invokeai.backend.model_manager import ModelRepoVariant
|
||||||
|
|
||||||
|
from ..util import select_hf_files
|
||||||
|
|
||||||
|
|
||||||
|
class UnknownMetadataException(Exception):
|
||||||
|
"""Raised when no metadata is available for a model."""
|
||||||
|
|
||||||
|
|
||||||
|
class CommercialUsage(str, Enum):
|
||||||
|
"""Type of commercial usage allowed."""
|
||||||
|
|
||||||
|
No = "None"
|
||||||
|
Image = "Image"
|
||||||
|
Rent = "Rent"
|
||||||
|
RentCivit = "RentCivit"
|
||||||
|
Sell = "Sell"
|
||||||
|
|
||||||
|
|
||||||
|
class LicenseRestrictions(BaseModel):
|
||||||
|
"""Broad categories of licensing restrictions."""
|
||||||
|
|
||||||
|
AllowNoCredit: bool = Field(
|
||||||
|
description="if true, model can be redistributed without crediting author", default=False
|
||||||
|
)
|
||||||
|
AllowDerivatives: bool = Field(description="if true, derivatives of this model can be redistributed", default=False)
|
||||||
|
AllowDifferentLicense: bool = Field(
|
||||||
|
description="if true, derivatives of this model be redistributed under a different license", default=False
|
||||||
|
)
|
||||||
|
AllowCommercialUse: CommercialUsage = Field(
|
||||||
|
description="Type of commercial use allowed or 'No' if no commercial use is allowed.", default_factory=set
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class RemoteModelFile(BaseModel):
|
||||||
|
"""Information about a downloadable file that forms part of a model."""
|
||||||
|
|
||||||
|
url: AnyHttpUrl = Field(description="The url to download this model file")
|
||||||
|
path: Path = Field(description="The path to the file, relative to the model root")
|
||||||
|
size: int = Field(description="The size of this file, in bytes")
|
||||||
|
sha256: Optional[str] = Field(description="SHA256 hash of this model (not always available)", default=None)
|
||||||
|
|
||||||
|
|
||||||
|
class ModelMetadataBase(BaseModel):
|
||||||
|
"""Base class for model metadata information."""
|
||||||
|
|
||||||
|
name: str = Field(description="model's name")
|
||||||
|
author: str = Field(description="model's author")
|
||||||
|
tags: Set[str] = Field(description="tags provided by model source")
|
||||||
|
|
||||||
|
|
||||||
|
class BaseMetadata(ModelMetadataBase):
|
||||||
|
"""Adds typing data for discriminated union."""
|
||||||
|
|
||||||
|
type: Literal["basemetadata"] = "basemetadata"
|
||||||
|
|
||||||
|
|
||||||
|
class ModelMetadataWithFiles(ModelMetadataBase):
|
||||||
|
"""Base class for metadata that contains a list of downloadable model file(s)."""
|
||||||
|
|
||||||
|
files: List[RemoteModelFile] = Field(description="model files and their sizes", default_factory=list)
|
||||||
|
|
||||||
|
def download_urls(
|
||||||
|
self,
|
||||||
|
variant: Optional[ModelRepoVariant] = None,
|
||||||
|
subfolder: Optional[Path] = None,
|
||||||
|
session: Optional[Session] = None,
|
||||||
|
) -> List[RemoteModelFile]:
|
||||||
|
"""
|
||||||
|
Return a list of URLs needed to download the model.
|
||||||
|
|
||||||
|
:param variant: Return files needed to reconstruct the indicated variant (e.g. ModelRepoVariant('fp16'))
|
||||||
|
:param subfolder: Return files in the designated subfolder only
|
||||||
|
:param session: A request.Session object for offline testing
|
||||||
|
|
||||||
|
Note that the "variant" and "subfolder" concepts currently only apply to HuggingFace.
|
||||||
|
However Civitai does have fields for the precision and format of its models, and may
|
||||||
|
provide variant selection criteria in the future.
|
||||||
|
"""
|
||||||
|
return self.files
|
||||||
|
|
||||||
|
|
||||||
|
class CivitaiMetadata(ModelMetadataWithFiles):
|
||||||
|
"""Extended metadata fields provided by Civitai."""
|
||||||
|
|
||||||
|
type: Literal["civitai"] = "civitai"
|
||||||
|
id: int = Field(description="Civitai version identifier")
|
||||||
|
version_name: str = Field(description="Version identifier, such as 'V2-alpha'")
|
||||||
|
version_id: int = Field(description="Civitai model version identifier")
|
||||||
|
created: datetime = Field(description="date the model was created")
|
||||||
|
updated: datetime = Field(description="date the model was last modified")
|
||||||
|
published: datetime = Field(description="date the model was published to Civitai")
|
||||||
|
description: str = Field(description="text description of model; may contain HTML")
|
||||||
|
version_description: str = Field(
|
||||||
|
description="text description of the model's reversion; usually change history; may contain HTML"
|
||||||
|
)
|
||||||
|
nsfw: bool = Field(description="whether the model tends to generate NSFW content", default=False)
|
||||||
|
restrictions: LicenseRestrictions = Field(description="license terms", default_factory=LicenseRestrictions)
|
||||||
|
trained_words: Set[str] = Field(description="words to trigger the model", default_factory=set)
|
||||||
|
download_url: AnyHttpUrl = Field(description="download URL for this model")
|
||||||
|
base_model_trained_on: str = Field(description="base model on which this model was trained (currently not an enum)")
|
||||||
|
thumbnail_url: Optional[AnyHttpUrl] = Field(description="a thumbnail image for this model", default=None)
|
||||||
|
weight_minmax: Tuple[float, float] = Field(
|
||||||
|
description="minimum and maximum slider values for a LoRA or other secondary model", default=(-1.0, +2.0)
|
||||||
|
) # note: For future use
|
||||||
|
|
||||||
|
@property
|
||||||
|
def credit_required(self) -> bool:
|
||||||
|
"""Return True if you must give credit for derivatives of this model and images generated from it."""
|
||||||
|
return not self.restrictions.AllowNoCredit
|
||||||
|
|
||||||
|
@property
|
||||||
|
def allow_commercial_use(self) -> bool:
|
||||||
|
"""Return True if commercial use is allowed."""
|
||||||
|
return self.restrictions.AllowCommercialUse != CommercialUsage("None")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def allow_derivatives(self) -> bool:
|
||||||
|
"""Return True if derivatives of this model can be redistributed."""
|
||||||
|
return self.restrictions.AllowDerivatives
|
||||||
|
|
||||||
|
@property
|
||||||
|
def allow_different_license(self) -> bool:
|
||||||
|
"""Return true if derivatives of this model can use a different license."""
|
||||||
|
return self.restrictions.AllowDifferentLicense
|
||||||
|
|
||||||
|
|
||||||
|
class HuggingFaceMetadata(ModelMetadataWithFiles):
|
||||||
|
"""Extended metadata fields provided by HuggingFace."""
|
||||||
|
|
||||||
|
type: Literal["huggingface"] = "huggingface"
|
||||||
|
id: str = Field(description="huggingface model id")
|
||||||
|
tag_dict: Dict[str, Any]
|
||||||
|
last_modified: datetime = Field(description="date of last commit to repo")
|
||||||
|
|
||||||
|
def download_urls(
|
||||||
|
self,
|
||||||
|
variant: Optional[ModelRepoVariant] = None,
|
||||||
|
subfolder: Optional[Path] = None,
|
||||||
|
session: Optional[Session] = None,
|
||||||
|
) -> List[RemoteModelFile]:
|
||||||
|
"""
|
||||||
|
Return list of downloadable files, filtering by variant and subfolder, if any.
|
||||||
|
|
||||||
|
:param variant: Return model files needed to reconstruct the indicated variant
|
||||||
|
:param subfolder: Return model files from the designated subfolder only
|
||||||
|
:param session: A request.Session object used for internet-free testing
|
||||||
|
|
||||||
|
Note that there is special variant-filtering behavior here:
|
||||||
|
When the fp16 variant is requested and not available, the
|
||||||
|
full-precision model is returned.
|
||||||
|
"""
|
||||||
|
session = session or Session()
|
||||||
|
configure_http_backend(backend_factory=lambda: session) # used in testing
|
||||||
|
|
||||||
|
paths = select_hf_files.filter_files(
|
||||||
|
[x.path for x in self.files], variant, subfolder
|
||||||
|
) # all files in the model
|
||||||
|
prefix = f"{subfolder}/" if subfolder else ""
|
||||||
|
|
||||||
|
# the next step reads model_index.json to determine which subdirectories belong
|
||||||
|
# to the model
|
||||||
|
if Path(f"{prefix}model_index.json") in paths:
|
||||||
|
url = hf_hub_url(self.id, filename="model_index.json", subfolder=subfolder)
|
||||||
|
resp = session.get(url)
|
||||||
|
resp.raise_for_status()
|
||||||
|
submodels = resp.json()
|
||||||
|
paths = [Path(subfolder or "", x) for x in paths if Path(x).parent.as_posix() in submodels]
|
||||||
|
paths.insert(0, Path(f"{prefix}model_index.json"))
|
||||||
|
|
||||||
|
return [x for x in self.files if x.path in paths]
|
||||||
|
|
||||||
|
|
||||||
|
AnyModelRepoMetadata = Annotated[Union[BaseMetadata, HuggingFaceMetadata, CivitaiMetadata], Field(discriminator="type")]
|
||||||
|
AnyModelRepoMetadataValidator = TypeAdapter(AnyModelRepoMetadata)
|
221
invokeai/backend/model_manager/metadata/metadata_store.py
Normal file
@ -0,0 +1,221 @@
|
|||||||
|
# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Development Team
|
||||||
|
"""
|
||||||
|
SQL Storage for Model Metadata
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
from typing import List, Optional, Set, Tuple
|
||||||
|
|
||||||
|
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||||
|
|
||||||
|
from .fetch import ModelMetadataFetchBase
|
||||||
|
from .metadata_base import AnyModelRepoMetadata, UnknownMetadataException
|
||||||
|
|
||||||
|
|
||||||
|
class ModelMetadataStore:
|
||||||
|
"""Store, search and fetch model metadata retrieved from remote repositories."""
|
||||||
|
|
||||||
|
def __init__(self, db: SqliteDatabase):
|
||||||
|
"""
|
||||||
|
Initialize a new object from preexisting sqlite3 connection and threading lock objects.
|
||||||
|
|
||||||
|
:param conn: sqlite3 connection object
|
||||||
|
:param lock: threading Lock object
|
||||||
|
"""
|
||||||
|
super().__init__()
|
||||||
|
self._db = db
|
||||||
|
self._cursor = self._db.conn.cursor()
|
||||||
|
|
||||||
|
def add_metadata(self, model_key: str, metadata: AnyModelRepoMetadata) -> None:
|
||||||
|
"""
|
||||||
|
Add a block of repo metadata to a model record.
|
||||||
|
|
||||||
|
The model record config must already exist in the database with the
|
||||||
|
same key. Otherwise a FOREIGN KEY constraint exception will be raised.
|
||||||
|
|
||||||
|
:param model_key: Existing model key in the `model_config` table
|
||||||
|
:param metadata: ModelRepoMetadata object to store
|
||||||
|
"""
|
||||||
|
json_serialized = metadata.model_dump_json()
|
||||||
|
with self._db.lock:
|
||||||
|
try:
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
INSERT INTO model_metadata(
|
||||||
|
id,
|
||||||
|
metadata
|
||||||
|
)
|
||||||
|
VALUES (?,?);
|
||||||
|
""",
|
||||||
|
(
|
||||||
|
model_key,
|
||||||
|
json_serialized,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
self._update_tags(model_key, metadata.tags)
|
||||||
|
self._db.conn.commit()
|
||||||
|
except sqlite3.IntegrityError as excp: # FOREIGN KEY error: the key was not in model_config table
|
||||||
|
self._db.conn.rollback()
|
||||||
|
raise UnknownMetadataException from excp
|
||||||
|
except sqlite3.Error as excp:
|
||||||
|
self._db.conn.rollback()
|
||||||
|
raise excp
|
||||||
|
|
||||||
|
def get_metadata(self, model_key: str) -> AnyModelRepoMetadata:
|
||||||
|
"""Retrieve the ModelRepoMetadata corresponding to model key."""
|
||||||
|
with self._db.lock:
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
SELECT metadata FROM model_metadata
|
||||||
|
WHERE id=?;
|
||||||
|
""",
|
||||||
|
(model_key,),
|
||||||
|
)
|
||||||
|
rows = self._cursor.fetchone()
|
||||||
|
if not rows:
|
||||||
|
raise UnknownMetadataException("model metadata not found")
|
||||||
|
return ModelMetadataFetchBase.from_json(rows[0])
|
||||||
|
|
||||||
|
def list_all_metadata(self) -> List[Tuple[str, AnyModelRepoMetadata]]: # key, metadata
|
||||||
|
"""Dump out all the metadata."""
|
||||||
|
with self._db.lock:
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
SELECT id,metadata FROM model_metadata;
|
||||||
|
""",
|
||||||
|
(),
|
||||||
|
)
|
||||||
|
rows = self._cursor.fetchall()
|
||||||
|
return [(x[0], ModelMetadataFetchBase.from_json(x[1])) for x in rows]
|
||||||
|
|
||||||
|
def update_metadata(self, model_key: str, metadata: AnyModelRepoMetadata) -> AnyModelRepoMetadata:
|
||||||
|
"""
|
||||||
|
Update metadata corresponding to the model with the indicated key.
|
||||||
|
|
||||||
|
:param model_key: Existing model key in the `model_config` table
|
||||||
|
:param metadata: ModelRepoMetadata object to update
|
||||||
|
"""
|
||||||
|
json_serialized = metadata.model_dump_json() # turn it into a json string.
|
||||||
|
with self._db.lock:
|
||||||
|
try:
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
UPDATE model_metadata
|
||||||
|
SET
|
||||||
|
metadata=?
|
||||||
|
WHERE id=?;
|
||||||
|
""",
|
||||||
|
(json_serialized, model_key),
|
||||||
|
)
|
||||||
|
if self._cursor.rowcount == 0:
|
||||||
|
raise UnknownMetadataException("model metadata not found")
|
||||||
|
self._update_tags(model_key, metadata.tags)
|
||||||
|
self._db.conn.commit()
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
self._db.conn.rollback()
|
||||||
|
raise e
|
||||||
|
|
||||||
|
return self.get_metadata(model_key)
|
||||||
|
|
||||||
|
def list_tags(self) -> Set[str]:
|
||||||
|
"""Return all tags in the tags table."""
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
select tag_text from tags;
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
return {x[0] for x in self._cursor.fetchall()}
|
||||||
|
|
||||||
|
def search_by_tag(self, tags: Set[str]) -> Set[str]:
|
||||||
|
"""Return the keys of models containing all of the listed tags."""
|
||||||
|
with self._db.lock:
|
||||||
|
try:
|
||||||
|
matches: Optional[Set[str]] = None
|
||||||
|
for tag in tags:
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
SELECT a.model_id FROM model_tags AS a,
|
||||||
|
tags AS b
|
||||||
|
WHERE a.tag_id=b.tag_id
|
||||||
|
AND b.tag_text=?;
|
||||||
|
""",
|
||||||
|
(tag,),
|
||||||
|
)
|
||||||
|
model_keys = {x[0] for x in self._cursor.fetchall()}
|
||||||
|
if matches is None:
|
||||||
|
matches = model_keys
|
||||||
|
matches = matches.intersection(model_keys)
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
raise e
|
||||||
|
return matches if matches else set()
|
||||||
|
|
||||||
|
def search_by_author(self, author: str) -> Set[str]:
|
||||||
|
"""Return the keys of models authored by the indicated author."""
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
SELECT id FROM model_metadata
|
||||||
|
WHERE author=?;
|
||||||
|
""",
|
||||||
|
(author,),
|
||||||
|
)
|
||||||
|
return {x[0] for x in self._cursor.fetchall()}
|
||||||
|
|
||||||
|
def search_by_name(self, name: str) -> Set[str]:
|
||||||
|
"""
|
||||||
|
Return the keys of models with the indicated name.
|
||||||
|
|
||||||
|
Note that this is the name of the model given to it by
|
||||||
|
the remote source. The user may have changed the local
|
||||||
|
name. The local name will be located in the model config
|
||||||
|
record object.
|
||||||
|
"""
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
SELECT id FROM model_metadata
|
||||||
|
WHERE name=?;
|
||||||
|
""",
|
||||||
|
(name,),
|
||||||
|
)
|
||||||
|
return {x[0] for x in self._cursor.fetchall()}
|
||||||
|
|
||||||
|
def _update_tags(self, model_key: str, tags: Set[str]) -> None:
|
||||||
|
"""Update tags for the model referenced by model_key."""
|
||||||
|
# remove previous tags from this model
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
DELETE FROM model_tags
|
||||||
|
WHERE model_id=?;
|
||||||
|
""",
|
||||||
|
(model_key,),
|
||||||
|
)
|
||||||
|
|
||||||
|
for tag in tags:
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
INSERT OR IGNORE INTO tags (
|
||||||
|
tag_text
|
||||||
|
)
|
||||||
|
VALUES (?);
|
||||||
|
""",
|
||||||
|
(tag,),
|
||||||
|
)
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
SELECT tag_id
|
||||||
|
FROM tags
|
||||||
|
WHERE tag_text = ?
|
||||||
|
LIMIT 1;
|
||||||
|
""",
|
||||||
|
(tag,),
|
||||||
|
)
|
||||||
|
tag_id = self._cursor.fetchone()[0]
|
||||||
|
self._cursor.execute(
|
||||||
|
"""--sql
|
||||||
|
INSERT OR IGNORE INTO model_tags (
|
||||||
|
model_id,
|
||||||
|
tag_id
|
||||||
|
)
|
||||||
|
VALUES (?,?);
|
||||||
|
""",
|
||||||
|
(model_key, tag_id),
|
||||||
|
)
|
@ -496,9 +496,9 @@ class PipelineFolderProbe(FolderProbeBase):
|
|||||||
def get_scheduler_prediction_type(self) -> SchedulerPredictionType:
|
def get_scheduler_prediction_type(self) -> SchedulerPredictionType:
|
||||||
with open(self.model_path / "scheduler" / "scheduler_config.json", "r") as file:
|
with open(self.model_path / "scheduler" / "scheduler_config.json", "r") as file:
|
||||||
scheduler_conf = json.load(file)
|
scheduler_conf = json.load(file)
|
||||||
if scheduler_conf["prediction_type"] == "v_prediction":
|
if scheduler_conf.get("prediction_type", "epsilon") == "v_prediction":
|
||||||
return SchedulerPredictionType.VPrediction
|
return SchedulerPredictionType.VPrediction
|
||||||
elif scheduler_conf["prediction_type"] == "epsilon":
|
elif scheduler_conf.get("prediction_type", "epsilon") == "epsilon":
|
||||||
return SchedulerPredictionType.Epsilon
|
return SchedulerPredictionType.Epsilon
|
||||||
else:
|
else:
|
||||||
raise InvalidModelConfigException("Unknown scheduler prediction type: {scheduler_conf['prediction_type']}")
|
raise InvalidModelConfigException("Unknown scheduler prediction type: {scheduler_conf['prediction_type']}")
|
||||||
|
132
invokeai/backend/model_manager/util/select_hf_files.py
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Development Team
|
||||||
|
"""
|
||||||
|
Select the files from a HuggingFace repository needed for a particular model variant.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
```
|
||||||
|
from invokeai.backend.model_manager.util.select_hf_files import select_hf_model_files
|
||||||
|
from invokeai.backend.model_manager.metadata.fetch import HuggingFaceMetadataFetch
|
||||||
|
|
||||||
|
metadata = HuggingFaceMetadataFetch().from_url("https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0")
|
||||||
|
files_to_download = select_hf_model_files(metadata.files, variant='onnx')
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional, Set
|
||||||
|
|
||||||
|
from ..config import ModelRepoVariant
|
||||||
|
|
||||||
|
|
||||||
|
def filter_files(
|
||||||
|
files: List[Path],
|
||||||
|
variant: Optional[ModelRepoVariant] = None,
|
||||||
|
subfolder: Optional[Path] = None,
|
||||||
|
) -> List[Path]:
|
||||||
|
"""
|
||||||
|
Take a list of files in a HuggingFace repo root and return paths to files needed to load the model.
|
||||||
|
|
||||||
|
:param files: List of files relative to the repo root.
|
||||||
|
:param subfolder: Filter by the indicated subfolder.
|
||||||
|
:param variant: Filter by files belonging to a particular variant, such as fp16.
|
||||||
|
|
||||||
|
The file list can be obtained from the `files` field of HuggingFaceMetadata,
|
||||||
|
as defined in `invokeai.backend.model_manager.metadata.metadata_base`.
|
||||||
|
"""
|
||||||
|
variant = variant or ModelRepoVariant.DEFAULT
|
||||||
|
paths: List[Path] = []
|
||||||
|
|
||||||
|
# Start by filtering on model file extensions, discarding images, docs, etc
|
||||||
|
for file in files:
|
||||||
|
if file.name.endswith((".json", ".txt")):
|
||||||
|
paths.append(file)
|
||||||
|
elif file.name.endswith(("learned_embeds.bin", "ip_adapter.bin", "lora_weights.safetensors")):
|
||||||
|
paths.append(file)
|
||||||
|
# BRITTLENESS WARNING!!
|
||||||
|
# Diffusers models always seem to have "model" in their name, and the regex filter below is applied to avoid
|
||||||
|
# downloading random checkpoints that might also be in the repo. However there is no guarantee
|
||||||
|
# that a checkpoint doesn't contain "model" in its name, and no guarantee that future diffusers models
|
||||||
|
# will adhere to this naming convention, so this is an area of brittleness.
|
||||||
|
elif re.search(r"model(\.[^.]+)?\.(safetensors|bin|onnx|xml|pth|pt|ckpt|msgpack)$", file.name):
|
||||||
|
paths.append(file)
|
||||||
|
|
||||||
|
# limit search to subfolder if requested
|
||||||
|
if subfolder:
|
||||||
|
paths = [x for x in paths if x.parent == Path(subfolder)]
|
||||||
|
|
||||||
|
# _filter_by_variant uniquifies the paths and returns a set
|
||||||
|
return sorted(_filter_by_variant(paths, variant))
|
||||||
|
|
||||||
|
|
||||||
|
def _filter_by_variant(files: List[Path], variant: ModelRepoVariant) -> Set[Path]:
|
||||||
|
"""Select the proper variant files from a list of HuggingFace repo_id paths."""
|
||||||
|
result = set()
|
||||||
|
basenames: Dict[Path, Path] = {}
|
||||||
|
for path in files:
|
||||||
|
if path.suffix == ".onnx":
|
||||||
|
if variant == ModelRepoVariant.ONNX:
|
||||||
|
result.add(path)
|
||||||
|
|
||||||
|
elif "openvino_model" in path.name:
|
||||||
|
if variant == ModelRepoVariant.OPENVINO:
|
||||||
|
result.add(path)
|
||||||
|
|
||||||
|
elif "flax_model" in path.name:
|
||||||
|
if variant == ModelRepoVariant.FLAX:
|
||||||
|
result.add(path)
|
||||||
|
|
||||||
|
elif path.suffix in [".json", ".txt"]:
|
||||||
|
result.add(path)
|
||||||
|
|
||||||
|
elif path.suffix in [".bin", ".safetensors", ".pt", ".ckpt"] and variant in [
|
||||||
|
ModelRepoVariant.FP16,
|
||||||
|
ModelRepoVariant.FP32,
|
||||||
|
ModelRepoVariant.DEFAULT,
|
||||||
|
]:
|
||||||
|
parent = path.parent
|
||||||
|
suffixes = path.suffixes
|
||||||
|
if len(suffixes) == 2:
|
||||||
|
variant_label, suffix = suffixes
|
||||||
|
basename = parent / Path(path.stem).stem
|
||||||
|
else:
|
||||||
|
variant_label = ""
|
||||||
|
suffix = suffixes[0]
|
||||||
|
basename = parent / path.stem
|
||||||
|
|
||||||
|
if previous := basenames.get(basename):
|
||||||
|
if (
|
||||||
|
previous.suffix != ".safetensors" and suffix == ".safetensors"
|
||||||
|
): # replace non-safetensors with safetensors when available
|
||||||
|
basenames[basename] = path
|
||||||
|
if variant_label == f".{variant}":
|
||||||
|
basenames[basename] = path
|
||||||
|
elif not variant_label and variant in [ModelRepoVariant.FP32, ModelRepoVariant.DEFAULT]:
|
||||||
|
basenames[basename] = path
|
||||||
|
else:
|
||||||
|
basenames[basename] = path
|
||||||
|
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
|
||||||
|
for v in basenames.values():
|
||||||
|
result.add(v)
|
||||||
|
|
||||||
|
# If one of the architecture-related variants was specified and no files matched other than
|
||||||
|
# config and text files then we return an empty list
|
||||||
|
if (
|
||||||
|
variant
|
||||||
|
and variant in [ModelRepoVariant.ONNX, ModelRepoVariant.OPENVINO, ModelRepoVariant.FLAX]
|
||||||
|
and not any(variant.value in x.name for x in result)
|
||||||
|
):
|
||||||
|
return set()
|
||||||
|
|
||||||
|
# Prune folders that contain just a `config.json`. This happens when
|
||||||
|
# the requested variant (e.g. "onnx") is missing
|
||||||
|
directories: Dict[Path, int] = {}
|
||||||
|
for x in result:
|
||||||
|
if not x.parent:
|
||||||
|
continue
|
||||||
|
directories[x.parent] = directories.get(x.parent, 0) + 1
|
||||||
|
|
||||||
|
return {x for x in result if directories[x.parent] > 1 or x.name != "config.json"}
|
@ -34,18 +34,23 @@ def choose_precision(device: torch.device) -> str:
|
|||||||
if device.type == "cuda":
|
if device.type == "cuda":
|
||||||
device_name = torch.cuda.get_device_name(device)
|
device_name = torch.cuda.get_device_name(device)
|
||||||
if not ("GeForce GTX 1660" in device_name or "GeForce GTX 1650" in device_name):
|
if not ("GeForce GTX 1660" in device_name or "GeForce GTX 1650" in device_name):
|
||||||
return "float16"
|
if config.precision == "bfloat16":
|
||||||
|
return "bfloat16"
|
||||||
|
else:
|
||||||
|
return "float16"
|
||||||
elif device.type == "mps":
|
elif device.type == "mps":
|
||||||
return "float16"
|
return "float16"
|
||||||
return "float32"
|
return "float32"
|
||||||
|
|
||||||
|
|
||||||
def torch_dtype(device: torch.device) -> torch.dtype:
|
def torch_dtype(device: torch.device) -> torch.dtype:
|
||||||
if config.full_precision:
|
precision = choose_precision(device)
|
||||||
return torch.float32
|
if precision == "float16":
|
||||||
if choose_precision(device) == "float16":
|
|
||||||
return torch.float16
|
return torch.float16
|
||||||
|
if precision == "bfloat16":
|
||||||
|
return torch.bfloat16
|
||||||
else:
|
else:
|
||||||
|
# "auto", "autocast", "float32"
|
||||||
return torch.float32
|
return torch.float32
|
||||||
|
|
||||||
|
|
||||||
|
@ -68,12 +68,9 @@ def welcome(latest_release: str, latest_prerelease: str):
|
|||||||
yield ""
|
yield ""
|
||||||
yield "This script will update InvokeAI to the latest release, or to the development version of your choice."
|
yield "This script will update InvokeAI to the latest release, or to the development version of your choice."
|
||||||
yield ""
|
yield ""
|
||||||
yield "When updating to an arbitrary tag or branch, be aware that the front end may be mismatched to the backend,"
|
|
||||||
yield "making the web frontend unusable. Please downgrade to the latest release if this happens."
|
|
||||||
yield ""
|
|
||||||
yield "[bold yellow]Options:"
|
yield "[bold yellow]Options:"
|
||||||
yield f"""[1] Update to the latest [bold]official release[/bold] ([italic]{latest_release}[/italic])
|
yield f"""[1] Update to the latest [bold]official release[/bold] ([italic]{latest_release}[/italic])
|
||||||
[2] Update to the latest [bold]pre-release[/bold] (may be buggy; caveat emptor!) ([italic]{latest_prerelease}[/italic])
|
[2] Update to the latest [bold]pre-release[/bold] (may be buggy, database backups are recommended before installation; caveat emptor!) ([italic]{latest_prerelease}[/italic])
|
||||||
[3] Manually enter the [bold]version[/bold] you wish to update to"""
|
[3] Manually enter the [bold]version[/bold] you wish to update to"""
|
||||||
|
|
||||||
console.rule()
|
console.rule()
|
||||||
|
@ -7,4 +7,4 @@ stats.html
|
|||||||
index.html
|
index.html
|
||||||
.yarn/
|
.yarn/
|
||||||
*.scss
|
*.scss
|
||||||
src/services/api/schema.d.ts
|
src/services/api/schema.ts
|
||||||
|
@ -28,12 +28,16 @@ module.exports = {
|
|||||||
'i18next',
|
'i18next',
|
||||||
'path',
|
'path',
|
||||||
'unused-imports',
|
'unused-imports',
|
||||||
|
'simple-import-sort',
|
||||||
|
'eslint-plugin-import',
|
||||||
|
// These rules are too strict for normal usage, but are useful for optimizing rerenders
|
||||||
|
// '@arthurgeron/react-usememo',
|
||||||
],
|
],
|
||||||
root: true,
|
root: true,
|
||||||
rules: {
|
rules: {
|
||||||
'path/no-relative-imports': ['error', { maxDepth: 0 }],
|
'path/no-relative-imports': ['error', { maxDepth: 0 }],
|
||||||
curly: 'error',
|
curly: 'error',
|
||||||
'i18next/no-literal-string': 2,
|
'i18next/no-literal-string': 'warn',
|
||||||
'react/jsx-no-bind': ['error', { allowBind: true }],
|
'react/jsx-no-bind': ['error', { allowBind: true }],
|
||||||
'react/jsx-curly-brace-presence': [
|
'react/jsx-curly-brace-presence': [
|
||||||
'error',
|
'error',
|
||||||
@ -43,6 +47,7 @@ module.exports = {
|
|||||||
'no-var': 'error',
|
'no-var': 'error',
|
||||||
'brace-style': 'error',
|
'brace-style': 'error',
|
||||||
'prefer-template': 'error',
|
'prefer-template': 'error',
|
||||||
|
'import/no-duplicates': 'error',
|
||||||
radix: 'error',
|
radix: 'error',
|
||||||
'space-before-blocks': 'error',
|
'space-before-blocks': 'error',
|
||||||
'import/prefer-default-export': 'off',
|
'import/prefer-default-export': 'off',
|
||||||
@ -57,6 +62,18 @@ module.exports = {
|
|||||||
argsIgnorePattern: '^_',
|
argsIgnorePattern: '^_',
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
// These rules are too strict for normal usage, but are useful for optimizing rerenders
|
||||||
|
// '@arthurgeron/react-usememo/require-usememo': [
|
||||||
|
// 'warn',
|
||||||
|
// {
|
||||||
|
// strict: false,
|
||||||
|
// checkHookReturnObject: false,
|
||||||
|
// fix: { addImports: true },
|
||||||
|
// checkHookCalls: false,
|
||||||
|
|
||||||
|
// },
|
||||||
|
// ],
|
||||||
|
// '@arthurgeron/react-usememo/require-memo': 'warn',
|
||||||
'@typescript-eslint/ban-ts-comment': 'warn',
|
'@typescript-eslint/ban-ts-comment': 'warn',
|
||||||
'@typescript-eslint/no-explicit-any': 'warn',
|
'@typescript-eslint/no-explicit-any': 'warn',
|
||||||
'@typescript-eslint/no-empty-interface': [
|
'@typescript-eslint/no-empty-interface': [
|
||||||
@ -65,7 +82,47 @@ module.exports = {
|
|||||||
allowSingleExtends: true,
|
allowSingleExtends: true,
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
'@typescript-eslint/consistent-type-imports': [
|
||||||
|
'error',
|
||||||
|
{
|
||||||
|
prefer: 'type-imports',
|
||||||
|
fixStyle: 'separate-type-imports',
|
||||||
|
disallowTypeAnnotations: true,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
'@typescript-eslint/no-import-type-side-effects': 'error',
|
||||||
|
'simple-import-sort/imports': 'error',
|
||||||
|
'simple-import-sort/exports': 'error',
|
||||||
|
// Prefer @invoke-ai/ui components over chakra
|
||||||
|
'no-restricted-imports': 'off',
|
||||||
|
'@typescript-eslint/no-restricted-imports': [
|
||||||
|
'warn',
|
||||||
|
{
|
||||||
|
paths: [
|
||||||
|
{
|
||||||
|
name: '@chakra-ui/react',
|
||||||
|
message: "Please import from '@invoke-ai/ui' instead.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: '@chakra-ui/layout',
|
||||||
|
message: "Please import from '@invoke-ai/ui' instead.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: '@chakra-ui/portal',
|
||||||
|
message: "Please import from '@invoke-ai/ui' instead.",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
],
|
||||||
},
|
},
|
||||||
|
overrides: [
|
||||||
|
{
|
||||||
|
files: ['*.stories.tsx'],
|
||||||
|
rules: {
|
||||||
|
'i18next/no-literal-string': 'off',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
settings: {
|
settings: {
|
||||||
react: {
|
react: {
|
||||||
version: 'detect',
|
version: 'detect',
|
||||||
|
1
invokeai/frontend/web/.gitignore
vendored
@ -8,6 +8,7 @@ pnpm-debug.log*
|
|||||||
lerna-debug.log*
|
lerna-debug.log*
|
||||||
|
|
||||||
node_modules
|
node_modules
|
||||||
|
.pnpm-store
|
||||||
# We want to distribute the repo
|
# We want to distribute the repo
|
||||||
dist
|
dist
|
||||||
dist/**
|
dist/**
|
||||||
|
@ -9,7 +9,8 @@ index.html
|
|||||||
.yarn/
|
.yarn/
|
||||||
.yalc/
|
.yalc/
|
||||||
*.scss
|
*.scss
|
||||||
src/services/api/schema.d.ts
|
src/services/api/schema.ts
|
||||||
static/
|
static/
|
||||||
src/theme/css/overlayscrollbars.css
|
src/theme/css/overlayscrollbars.css
|
||||||
|
src/theme_/css/overlayscrollbars.css
|
||||||
pnpm-lock.yaml
|
pnpm-lock.yaml
|
||||||
|
25
invokeai/frontend/web/.storybook/ReduxInit.tsx
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
import { PropsWithChildren, memo, useEffect } from 'react';
|
||||||
|
import { modelChanged } from '../src/features/parameters/store/generationSlice';
|
||||||
|
import { useAppDispatch } from '../src/app/store/storeHooks';
|
||||||
|
import { useGlobalModifiersInit } from '@invoke-ai/ui';
|
||||||
|
/**
|
||||||
|
* Initializes some state for storybook. Must be in a different component
|
||||||
|
* so that it is run inside the redux context.
|
||||||
|
*/
|
||||||
|
export const ReduxInit = memo((props: PropsWithChildren) => {
|
||||||
|
const dispatch = useAppDispatch();
|
||||||
|
useGlobalModifiersInit();
|
||||||
|
useEffect(() => {
|
||||||
|
dispatch(
|
||||||
|
modelChanged({
|
||||||
|
model_name: 'test_model',
|
||||||
|
base_model: 'sd-1',
|
||||||
|
model_type: 'main',
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
return props.children;
|
||||||
|
});
|
||||||
|
|
||||||
|
ReduxInit.displayName = 'ReduxInit';
|
@ -6,6 +6,7 @@ const config: StorybookConfig = {
|
|||||||
'@storybook/addon-links',
|
'@storybook/addon-links',
|
||||||
'@storybook/addon-essentials',
|
'@storybook/addon-essentials',
|
||||||
'@storybook/addon-interactions',
|
'@storybook/addon-interactions',
|
||||||
|
'@storybook/addon-storysource',
|
||||||
],
|
],
|
||||||
framework: {
|
framework: {
|
||||||
name: '@storybook/react-vite',
|
name: '@storybook/react-vite',
|
||||||
|
@ -1,16 +1,17 @@
|
|||||||
import { Preview } from '@storybook/react';
|
import { Preview } from '@storybook/react';
|
||||||
import { themes } from '@storybook/theming';
|
import { themes } from '@storybook/theming';
|
||||||
import i18n from 'i18next';
|
import i18n from 'i18next';
|
||||||
import React from 'react';
|
|
||||||
import { initReactI18next } from 'react-i18next';
|
import { initReactI18next } from 'react-i18next';
|
||||||
import { Provider } from 'react-redux';
|
import { Provider } from 'react-redux';
|
||||||
import GlobalHotkeys from '../src/app/components/GlobalHotkeys';
|
|
||||||
import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider';
|
import ThemeLocaleProvider from '../src/app/components/ThemeLocaleProvider';
|
||||||
|
import { $baseUrl } from '../src/app/store/nanostores/baseUrl';
|
||||||
import { createStore } from '../src/app/store/store';
|
import { createStore } from '../src/app/store/store';
|
||||||
|
import { Container } from '@chakra-ui/react';
|
||||||
// TODO: Disabled for IDE performance issues with our translation JSON
|
// TODO: Disabled for IDE performance issues with our translation JSON
|
||||||
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
||||||
// @ts-ignore
|
// @ts-ignore
|
||||||
import translationEN from '../public/locales/en.json';
|
import translationEN from '../public/locales/en.json';
|
||||||
|
import { ReduxInit } from './ReduxInit';
|
||||||
|
|
||||||
i18n.use(initReactI18next).init({
|
i18n.use(initReactI18next).init({
|
||||||
lng: 'en',
|
lng: 'en',
|
||||||
@ -25,17 +26,21 @@ i18n.use(initReactI18next).init({
|
|||||||
});
|
});
|
||||||
|
|
||||||
const store = createStore(undefined, false);
|
const store = createStore(undefined, false);
|
||||||
|
$baseUrl.set('http://localhost:9090');
|
||||||
|
|
||||||
const preview: Preview = {
|
const preview: Preview = {
|
||||||
decorators: [
|
decorators: [
|
||||||
(Story) => (
|
(Story) => {
|
||||||
<Provider store={store}>
|
return (
|
||||||
<ThemeLocaleProvider>
|
<Provider store={store}>
|
||||||
<GlobalHotkeys />
|
<ThemeLocaleProvider>
|
||||||
<Story />
|
<ReduxInit>
|
||||||
</ThemeLocaleProvider>
|
<Story />
|
||||||
</Provider>
|
</ReduxInit>
|
||||||
),
|
</ThemeLocaleProvider>
|
||||||
|
</Provider>
|
||||||
|
);
|
||||||
|
},
|
||||||
],
|
],
|
||||||
parameters: {
|
parameters: {
|
||||||
docs: {
|
docs: {
|
||||||
|
15
invokeai/frontend/web/.unimportedrc.json
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"entry": ["src/main.tsx"],
|
||||||
|
"extensions": [".ts", ".tsx"],
|
||||||
|
"ignorePatterns": [
|
||||||
|
"**/node_modules/**",
|
||||||
|
"dist/**",
|
||||||
|
"public/**",
|
||||||
|
"**/*.stories.tsx",
|
||||||
|
"config/**"
|
||||||
|
],
|
||||||
|
"ignoreUnresolved": [],
|
||||||
|
"ignoreUnimported": ["src/i18.d.ts", "vite.config.ts", "src/vite-env.d.ts"],
|
||||||
|
"respectGitignore": true,
|
||||||
|
"ignoreUnused": []
|
||||||
|
}
|
@ -1,6 +1,6 @@
|
|||||||
import react from '@vitejs/plugin-react-swc';
|
import react from '@vitejs/plugin-react-swc';
|
||||||
import { visualizer } from 'rollup-plugin-visualizer';
|
import { visualizer } from 'rollup-plugin-visualizer';
|
||||||
import { PluginOption, UserConfig } from 'vite';
|
import type { PluginOption, UserConfig } from 'vite';
|
||||||
import eslint from 'vite-plugin-eslint';
|
import eslint from 'vite-plugin-eslint';
|
||||||
import tsconfigPaths from 'vite-tsconfig-paths';
|
import tsconfigPaths from 'vite-tsconfig-paths';
|
||||||
|
|
@ -1,5 +1,6 @@
|
|||||||
import { UserConfig } from 'vite';
|
import type { UserConfig } from 'vite';
|
||||||
import { commonPlugins } from './common';
|
|
||||||
|
import { commonPlugins } from './common.mjs';
|
||||||
|
|
||||||
export const appConfig: UserConfig = {
|
export const appConfig: UserConfig = {
|
||||||
base: './',
|
base: './',
|
@ -1,8 +1,9 @@
|
|||||||
import path from 'path';
|
import path from 'path';
|
||||||
import { UserConfig } from 'vite';
|
import type { UserConfig } from 'vite';
|
||||||
import dts from 'vite-plugin-dts';
|
|
||||||
import cssInjectedByJsPlugin from 'vite-plugin-css-injected-by-js';
|
import cssInjectedByJsPlugin from 'vite-plugin-css-injected-by-js';
|
||||||
import { commonPlugins } from './common';
|
import dts from 'vite-plugin-dts';
|
||||||
|
|
||||||
|
import { commonPlugins } from './common.mjs';
|
||||||
|
|
||||||
export const packageConfig: UserConfig = {
|
export const packageConfig: UserConfig = {
|
||||||
base: './',
|
base: './',
|