mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Compare commits
450 Commits
separate-g
...
lstein/fea
Author | SHA1 | Date | |
---|---|---|---|
fc02da4627 | |||
ddf917f68c | |||
c90807ba33 | |||
842b57e57c | |||
f538ed54fb | |||
d0a936ebd4 | |||
27622dfd5e | |||
72b44f7ebc | |||
7726d312e1 | |||
61520dfb86 | |||
6e869e6038 | |||
9eacc0c189 | |||
23606d9e83 | |||
d4d0fea078 | |||
a5771f6120 | |||
35f847d5b7 | |||
3278497674 | |||
c9350f71be | |||
b00e27b022 | |||
a6283b9fb6 | |||
64fb15e117 | |||
7019d93ff0 | |||
7467768d48 | |||
e2d7b514e0 | |||
c36d12a50f | |||
c7f8fe4d5e | |||
ffb41c3616 | |||
611006b692 | |||
ca496f0380 | |||
01d8ab04a5 | |||
7a4122235f | |||
75f4e27522 | |||
8ae757334e | |||
2038064a34 | |||
689cb9d31d | |||
0cab1d1e04 | |||
9bd7dabed3 | |||
30283a4767 | |||
dacfe6853e | |||
2b093da4b0 | |||
368c1b709c | |||
2269253a6c | |||
3490aee247 | |||
f592ad3649 | |||
42a2bad936 | |||
ba2fd875ad | |||
9d30a063e7 | |||
dc9a9c0160 | |||
d45931a0af | |||
c1de129bbc | |||
bf852348aa | |||
fc63419c6e | |||
c356cabe97 | |||
97fe6e483d | |||
1069303309 | |||
eb607498bf | |||
bdb52cfcf7 | |||
3f6f8199f6 | |||
9a5575b46b | |||
dea9142cb8 | |||
02329df1df | |||
f5337c7ce2 | |||
b02f2da71d | |||
6c13fa13ea | |||
040ea8f41b | |||
13c72206d8 | |||
96ef7e3889 | |||
2eacbb4d9d | |||
0e51495071 | |||
b378cfcb46 | |||
6c558279dd | |||
bd3e8cbdfb | |||
aa689e5384 | |||
484488dee4 | |||
e40b715f39 | |||
e8f4012b56 | |||
bc12ca9220 | |||
5ceaeb234d | |||
429f87c60b | |||
ee3096f616 | |||
6af6673a4f | |||
b173e4c08d | |||
059f869737 | |||
813e679b77 | |||
857e9c9b5f | |||
5c1aa02e7b | |||
0e3fb4e97a | |||
6e882d3fd6 | |||
fabef8b45b | |||
97f16b2b7e | |||
609c2c0abf | |||
fe5fa7f8cc | |||
8b30cbe81e | |||
2af9286345 | |||
29b04b7e83 | |||
04aa97f0fd | |||
39fa8874fc | |||
4e245e9331 | |||
74a51571a0 | |||
c87497fd54 | |||
3f61c51c3a | |||
07c9c0b0ab | |||
2322d3cbbe | |||
419ce02aae | |||
629ccd059e | |||
2dfa51c2e5 | |||
edde0fe174 | |||
3e46f7a010 | |||
faa555df20 | |||
7a3e19227f | |||
e706afe8a6 | |||
acca197893 | |||
aa2c404cab | |||
300a4693ae | |||
820614e4d8 | |||
fe563f05fc | |||
d89e653588 | |||
4e9207a10b | |||
8c6c33a315 | |||
ed0f9f7d66 | |||
b25850a585 | |||
94257e35f5 | |||
19e16384f7 | |||
a3ed6e694c | |||
20d9d10798 | |||
77a70a8a9c | |||
06abea8db0 | |||
a28f0932e6 | |||
6968a068bb | |||
9d5b96c119 | |||
5daefccf77 | |||
1f3c35ee90 | |||
f78ed3a952 | |||
d38262a7ea | |||
5feb62d440 | |||
f8df293d2c | |||
9d045964d6 | |||
9fa9ebe386 | |||
1cb1b60b4c | |||
1d4517d00d | |||
982b513af3 | |||
f1450c2c24 | |||
5d16a40b95 | |||
d871fca643 | |||
8cd65755ef | |||
e76cc71e81 | |||
1ed1c1fb24 | |||
9063ea9173 | |||
4633242503 | |||
e8b030427d | |||
415a4baf78 | |||
09a8c0328a | |||
e32c609fec | |||
beffca6b49 | |||
a281671e6c | |||
5179587b5a | |||
cb180909f7 | |||
ce9aeeece3 | |||
5ecfa86cd0 | |||
d09f03ef25 | |||
3f8e2bfd18 | |||
60492500db | |||
f69938c6a8 | |||
5e39e46954 | |||
1079bf3ccf | |||
fbbf9c01b5 | |||
15cef98a8b | |||
5606f4d627 | |||
53c8f36029 | |||
b9884a6166 | |||
77b86e9ad5 | |||
a6181b5759 | |||
b4b0af7c60 | |||
3d1f3818cb | |||
deffeb9655 | |||
b8c46fb15b | |||
9539ecce79 | |||
7716a4a8c7 | |||
dedce2d896 | |||
e43bfa3d70 | |||
897fe497dc | |||
7b1f9409bc | |||
a72cea014c | |||
b4182b190f | |||
22ac204678 | |||
7ca447ded1 | |||
4df28f1de6 | |||
ebd0cb6113 | |||
fbe3afa5e1 | |||
3fb116155b | |||
7387b0bdc9 | |||
7ea9cac9a3 | |||
ea5bc94b9c | |||
a1743647b7 | |||
a6d64f69e1 | |||
e74e78894f | |||
71a1740740 | |||
b79f2f337e | |||
a0420d1442 | |||
a17021ba0c | |||
faa1ffb06f | |||
8c04eec210 | |||
330e1354b4 | |||
21621eebf0 | |||
c24f2046e7 | |||
297408d67e | |||
0131e7d928 | |||
06ff105a1f | |||
bb8e6bbee6 | |||
328dc99f3a | |||
ef55077e84 | |||
ba3d8af161 | |||
b07b7af710 | |||
19d66d5ec7 | |||
ed20255abf | |||
fed1f983db | |||
a386544a1d | |||
0851de9090 | |||
1bd8e33f8c | |||
e3f29ed320 | |||
3fd824306c | |||
2584a950aa | |||
1adaf63253 | |||
b9f1a4bd65 | |||
731942dbed | |||
4117cea5bf | |||
21617f3bc1 | |||
9fcd67b5c0 | |||
a4be935458 | |||
eb6e6548ed | |||
8287fcf097 | |||
dd475e28ed | |||
24e741e2d1 | |||
e0bf9ce5c6 | |||
c66e8b395e | |||
4c417adc82 | |||
437a413ca3 | |||
4492bedd19 | |||
db12ce95a8 | |||
ee3a1a95ef | |||
4bb5aba70e | |||
cd55c23713 | |||
1d2743af1b | |||
99d2099ccd | |||
b64a693f16 | |||
9d523a3094 | |||
af660163ca | |||
7e4b462fca | |||
4468dd6948 | |||
4f39e248dd | |||
44b3e5d43f | |||
8894a9e48a | |||
c73f58e486 | |||
614fece147 | |||
8ef8082d65 | |||
d93d4afbb7 | |||
01207a2fa5 | |||
d0800c4888 | |||
2a300ecada | |||
90340a39c7 | |||
ee77abb4fe | |||
004bca5c42 | |||
5ad048a161 | |||
6369ccd05e | |||
3a5314f1ca | |||
4c0896e436 | |||
f7cd3cf1f4 | |||
efea1a8a7d | |||
d0d695c020 | |||
2a648da557 | |||
54f1a1f952 | |||
8d2a4db902 | |||
7b393656de | |||
43948e0758 | |||
cc03fcbcb6 | |||
d1e445fa49 | |||
adba8489f2 | |||
d919022ba5 | |||
e076898798 | |||
9f19b766a4 | |||
4688623711 | |||
be951da99d | |||
9ee2e7ff25 | |||
149ff758b9 | |||
65d415d5aa | |||
c74c1927ec | |||
c454ccc65c | |||
46fd3465ce | |||
97afa6e2a6 | |||
96730107d1 | |||
6a9dede66f | |||
8c2ff794d5 | |||
145bb45858 | |||
9376b13435 | |||
eec82afd89 | |||
c47dbf7258 | |||
92b2e8186a | |||
70a88c6b99 | |||
56e7c04475 | |||
bd5b43c00d | |||
631e789195 | |||
133c90e116 | |||
4433b78e59 | |||
daeb766468 | |||
92b0d13d0e | |||
67d26cd633 | |||
9e28317a12 | |||
5b51ebf1c4 | |||
59228643a9 | |||
b24657df11 | |||
d4686b7f64 | |||
67163c2224 | |||
f01e81d382 | |||
a50e0a4802 | |||
df0a5aa92a | |||
0bd9a0a9ea | |||
4ae2cd242e | |||
0696094d95 | |||
fb1ae55010 | |||
deb1d4eb14 | |||
d156fd2093 | |||
c41e87160a | |||
eba1fc1355 | |||
96702c395e | |||
3361aec065 | |||
8ba4b2a150 | |||
df12e12e09 | |||
ee38fbe89c | |||
6e2cef1db5 | |||
b1f5ac4548 | |||
e52274ecac | |||
66f0ff5b13 | |||
cab5b64f0b | |||
a42812d78d | |||
281222df3c | |||
d5674150fa | |||
0cb2cf6644 | |||
da87266c9c | |||
35731a6f51 | |||
a3dfa161a8 | |||
42d606f07c | |||
9063b1ae61 | |||
6aae88bd88 | |||
57c1954da7 | |||
2410ed689a | |||
a10dccdd43 | |||
a3570901f7 | |||
fd457955bc | |||
1f69613f5d | |||
7a87ebb3b2 | |||
4ee4a801c6 | |||
53b7f6be37 | |||
dbd7c94e7c | |||
50bb9a6b41 | |||
13bb3c5e15 | |||
80c2a4b925 | |||
8ce485b036 | |||
6fc3e86061 | |||
33ded359e6 | |||
effbd8a1ba | |||
ddde355b09 | |||
fe2c6f621a | |||
d0fcdbe8a3 | |||
a28547b3dd | |||
c7b2bdb846 | |||
4a20377fef | |||
ed803640f7 | |||
576bb4a61d | |||
b6065d6328 | |||
04229f4a21 | |||
73a190fb6e | |||
952d97741e | |||
afd08c5f46 | |||
d1f859a446 | |||
5118160282 | |||
8e694992bb | |||
4077dfe0c3 | |||
fe8e391aad | |||
ac8f606d99 | |||
0aa2070ce0 | |||
ff66779aa3 | |||
2ca65ab9fa | |||
b34624a2a8 | |||
b8aa9752f1 | |||
1b5d8eb9e7 | |||
773182f425 | |||
6386109fc5 | |||
c008704bc8 | |||
a3a42d25d3 | |||
8959d1bf51 | |||
8fd9342712 | |||
f0b815aa9b | |||
3a5b0b819c | |||
bbcbcd9b63 | |||
fdecb886b2 | |||
2f0a653a7f | |||
b0add805c5 | |||
ed4e8624dd | |||
ad70cdfe87 | |||
549d461107 | |||
cab3748010 | |||
779b3e0e8e | |||
9b48029bc9 | |||
347f1fd0b7 | |||
4af5a09a68 | |||
8df02623f2 | |||
aa88fadc30 | |||
8411029d93 | |||
239b1e8cc7 | |||
8a68355926 | |||
86aef9f31d | |||
2f6964bfa5 | |||
c1cdfd132b | |||
f6bfe5e6f2 | |||
b5a8455b5f | |||
645ef081ea | |||
e68d7fa6d7 | |||
c5ab1c7ad6 | |||
5a561cab78 | |||
132790eebe | |||
c57f6ee885 | |||
d4a2ea68fc | |||
528ac5dd25 | |||
afd9ae7712 | |||
4eefed12f0 | |||
4301a3d6fd | |||
99c0662e3f | |||
cdc0d0c182 | |||
a00369a67a | |||
4f096ac3ba | |||
f5e3341465 | |||
474852ef7e | |||
b1d72d411e | |||
46614ee28f | |||
b019f9bb8b | |||
b857692073 | |||
90fb7a1a59 | |||
56fcf6af78 | |||
c4fe7e697b | |||
2fd483dfc8 | |||
b9a9507422 | |||
f2744fd7d1 | |||
fe6e879d38 | |||
d3ab08fe10 | |||
b0615bdfd4 | |||
bab20467fb | |||
e24624109e | |||
458e7185b8 | |||
a95128f5f2 | |||
46f32c5e3c |
67
.github/pull_request_template.md
vendored
67
.github/pull_request_template.md
vendored
@ -1,66 +1,21 @@
|
||||
## What type of PR is this? (check all applicable)
|
||||
## Summary
|
||||
|
||||
- [ ] Refactor
|
||||
- [ ] Feature
|
||||
- [ ] Bug Fix
|
||||
- [ ] Optimization
|
||||
- [ ] Documentation Update
|
||||
- [ ] Community Node Submission
|
||||
<!--A description of the changes in this PR. Include the kind of change (fix, feature, docs, etc), the "why" and the "how". Screenshots or videos are useful for frontend changes.-->
|
||||
|
||||
## Related Issues / Discussions
|
||||
|
||||
## Have you discussed this change with the InvokeAI team?
|
||||
- [ ] Yes
|
||||
- [ ] No, because:
|
||||
<!--WHEN APPLICABLE: List any related issues or discussions on github or discord. If this PR closes an issue, please use the "Closes #1234" format, so that the issue will be automatically closed when the PR merges.-->
|
||||
|
||||
## QA Instructions
|
||||
|
||||
## Have you updated all relevant documentation?
|
||||
- [ ] Yes
|
||||
- [ ] No
|
||||
|
||||
|
||||
## Description
|
||||
|
||||
|
||||
## Related Tickets & Documents
|
||||
|
||||
<!--
|
||||
For pull requests that relate or close an issue, please include them
|
||||
below.
|
||||
|
||||
For example having the text: "closes #1234" would connect the current pull
|
||||
request to issue 1234. And when we merge the pull request, Github will
|
||||
automatically close the issue.
|
||||
-->
|
||||
|
||||
- Related Issue #
|
||||
- Closes #
|
||||
|
||||
## QA Instructions, Screenshots, Recordings
|
||||
|
||||
<!--
|
||||
Please provide steps on how to test changes, any hardware or
|
||||
software specifications as well as any other pertinent information.
|
||||
-->
|
||||
<!--WHEN APPLICABLE: Describe how we can test the changes in this PR.-->
|
||||
|
||||
## Merge Plan
|
||||
|
||||
<!--
|
||||
A merge plan describes how this PR should be handled after it is approved.
|
||||
<!--WHEN APPLICABLE: Large PRs, or PRs that touch sensitive things like DB schemas, may need some care when merging. For example, a careful rebase by the change author, timing to not interfere with a pending release, or a message to contributors on discord after merging.-->
|
||||
|
||||
Example merge plans:
|
||||
- "This PR can be merged when approved"
|
||||
- "This must be squash-merged when approved"
|
||||
- "DO NOT MERGE - I will rebase and tidy commits before merging"
|
||||
- "#dev-chat on discord needs to be advised of this change when it is merged"
|
||||
## Checklist
|
||||
|
||||
A merge plan is particularly important for large PRs or PRs that touch the
|
||||
database in any way.
|
||||
-->
|
||||
|
||||
## Added/updated tests?
|
||||
|
||||
- [ ] Yes
|
||||
- [ ] No : _please replace this line with details on why tests
|
||||
have not been included_
|
||||
|
||||
## [optional] Are there any post deployment tasks we need to perform?
|
||||
- [ ] _The PR has a short but descriptive title, suitable for a changelog_
|
||||
- [ ] _Tests added / updated (if applicable)_
|
||||
- [ ] _Documentation added / updated (if applicable)_
|
||||
|
28
.github/workflows/frontend-checks.yml
vendored
28
.github/workflows/frontend-checks.yml
vendored
@ -1,7 +1,7 @@
|
||||
# Runs frontend code quality checks.
|
||||
#
|
||||
# Checks for changes to frontend files before running the checks.
|
||||
# When manually triggered or when called from another workflow, always runs the checks.
|
||||
# If always_run is true, always runs the checks.
|
||||
|
||||
name: 'frontend checks'
|
||||
|
||||
@ -16,7 +16,19 @@ on:
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the checks'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
workflow_call:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the checks'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
@ -30,7 +42,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: check for changed frontend files
|
||||
if: ${{ github.event_name != 'workflow_dispatch' && github.event_name != 'workflow_call' }}
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v42
|
||||
with:
|
||||
@ -39,30 +51,30 @@ jobs:
|
||||
- 'invokeai/frontend/web/**'
|
||||
|
||||
- name: install dependencies
|
||||
if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' }}
|
||||
if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: ./.github/actions/install-frontend-deps
|
||||
|
||||
- name: tsc
|
||||
if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' }}
|
||||
if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: 'pnpm lint:tsc'
|
||||
shell: bash
|
||||
|
||||
- name: dpdm
|
||||
if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' }}
|
||||
if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: 'pnpm lint:dpdm'
|
||||
shell: bash
|
||||
|
||||
- name: eslint
|
||||
if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' }}
|
||||
if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: 'pnpm lint:eslint'
|
||||
shell: bash
|
||||
|
||||
- name: prettier
|
||||
if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' }}
|
||||
if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: 'pnpm lint:prettier'
|
||||
shell: bash
|
||||
|
||||
- name: knip
|
||||
if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' }}
|
||||
if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: 'pnpm lint:knip'
|
||||
shell: bash
|
||||
|
20
.github/workflows/frontend-tests.yml
vendored
20
.github/workflows/frontend-tests.yml
vendored
@ -1,7 +1,7 @@
|
||||
# Runs frontend tests.
|
||||
#
|
||||
# Checks for changes to frontend files before running the tests.
|
||||
# When manually triggered or called from another workflow, always runs the tests.
|
||||
# If always_run is true, always runs the tests.
|
||||
|
||||
name: 'frontend tests'
|
||||
|
||||
@ -16,7 +16,19 @@ on:
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the tests'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
workflow_call:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the tests'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
@ -30,7 +42,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: check for changed frontend files
|
||||
if: ${{ github.event_name != 'workflow_dispatch' && github.event_name != 'workflow_call' }}
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v42
|
||||
with:
|
||||
@ -39,10 +51,10 @@ jobs:
|
||||
- 'invokeai/frontend/web/**'
|
||||
|
||||
- name: install dependencies
|
||||
if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' }}
|
||||
if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: ./.github/actions/install-frontend-deps
|
||||
|
||||
- name: vitest
|
||||
if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' }}
|
||||
if: ${{ steps.changed-files.outputs.frontend_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: 'pnpm test:no-watch'
|
||||
shell: bash
|
||||
|
24
.github/workflows/python-checks.yml
vendored
24
.github/workflows/python-checks.yml
vendored
@ -1,7 +1,7 @@
|
||||
# Runs python code quality checks.
|
||||
#
|
||||
# Checks for changes to python files before running the checks.
|
||||
# When manually triggered or called from another workflow, always runs the tests.
|
||||
# If always_run is true, always runs the checks.
|
||||
#
|
||||
# TODO: Add mypy or pyright to the checks.
|
||||
|
||||
@ -18,7 +18,19 @@ on:
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the checks'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
workflow_call:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the checks'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
python-checks:
|
||||
@ -29,7 +41,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: check for changed python files
|
||||
if: ${{ github.event_name != 'workflow_dispatch' && github.event_name != 'workflow_call' }}
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v42
|
||||
with:
|
||||
@ -41,7 +53,7 @@ jobs:
|
||||
- 'tests/**'
|
||||
|
||||
- name: setup python
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' }}
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
@ -49,16 +61,16 @@ jobs:
|
||||
cache-dependency-path: pyproject.toml
|
||||
|
||||
- name: install ruff
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' }}
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: pip install ruff
|
||||
shell: bash
|
||||
|
||||
- name: ruff check
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' }}
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: ruff check --output-format=github .
|
||||
shell: bash
|
||||
|
||||
- name: ruff format
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' }}
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: ruff format --check .
|
||||
shell: bash
|
||||
|
22
.github/workflows/python-tests.yml
vendored
22
.github/workflows/python-tests.yml
vendored
@ -1,7 +1,7 @@
|
||||
# Runs python tests on a matrix of python versions and platforms.
|
||||
#
|
||||
# Checks for changes to python files before running the tests.
|
||||
# When manually triggered or called from another workflow, always runs the tests.
|
||||
# If always_run is true, always runs the tests.
|
||||
|
||||
name: 'python tests'
|
||||
|
||||
@ -16,7 +16,19 @@ on:
|
||||
- 'synchronize'
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the tests'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
workflow_call:
|
||||
inputs:
|
||||
always_run:
|
||||
description: 'Always run the tests'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
@ -63,7 +75,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: check for changed python files
|
||||
if: ${{ github.event_name != 'workflow_dispatch' && github.event_name != 'workflow_call' }}
|
||||
if: ${{ inputs.always_run != true }}
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v42
|
||||
with:
|
||||
@ -75,7 +87,7 @@ jobs:
|
||||
- 'tests/**'
|
||||
|
||||
- name: setup python
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' }}
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
@ -83,12 +95,12 @@ jobs:
|
||||
cache-dependency-path: pyproject.toml
|
||||
|
||||
- name: install dependencies
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' }}
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
env:
|
||||
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
|
||||
run: >
|
||||
pip3 install --editable=".[test]"
|
||||
|
||||
- name: run pytest
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'workflow_call' }}
|
||||
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
|
||||
run: pytest
|
||||
|
12
.github/workflows/release.yml
vendored
12
.github/workflows/release.yml
vendored
@ -30,15 +30,23 @@ jobs:
|
||||
|
||||
frontend-checks:
|
||||
uses: ./.github/workflows/frontend-checks.yml
|
||||
with:
|
||||
always_run: true
|
||||
|
||||
frontend-tests:
|
||||
uses: ./.github/workflows/frontend-tests.yml
|
||||
with:
|
||||
always_run: true
|
||||
|
||||
python-checks:
|
||||
uses: ./.github/workflows/python-checks.yml
|
||||
with:
|
||||
always_run: true
|
||||
|
||||
python-tests:
|
||||
uses: ./.github/workflows/python-tests.yml
|
||||
with:
|
||||
always_run: true
|
||||
|
||||
build:
|
||||
uses: ./.github/workflows/build-installer.yml
|
||||
@ -58,6 +66,8 @@ jobs:
|
||||
environment:
|
||||
name: testpypi
|
||||
url: https://test.pypi.org/p/invokeai
|
||||
permissions:
|
||||
id-token: write
|
||||
steps:
|
||||
- name: download distribution from build job
|
||||
uses: actions/download-artifact@v4
|
||||
@ -85,6 +95,8 @@ jobs:
|
||||
environment:
|
||||
name: pypi
|
||||
url: https://pypi.org/p/invokeai
|
||||
permissions:
|
||||
id-token: write
|
||||
steps:
|
||||
- name: download distribution from build job
|
||||
uses: actions/download-artifact@v4
|
||||
|
29
Makefile
29
Makefile
@ -6,16 +6,18 @@ default: help
|
||||
help:
|
||||
@echo Developer commands:
|
||||
@echo
|
||||
@echo "ruff Run ruff, fixing any safely-fixable errors and formatting"
|
||||
@echo "ruff-unsafe Run ruff, fixing all fixable errors and formatting"
|
||||
@echo "mypy Run mypy using the config in pyproject.toml to identify type mismatches and other coding errors"
|
||||
@echo "mypy-all Run mypy ignoring the config in pyproject.tom but still ignoring missing imports"
|
||||
@echo "test" Run the unit tests.
|
||||
@echo "frontend-install" Install the pnpm modules needed for the front end
|
||||
@echo "frontend-build Build the frontend in order to run on localhost:9090"
|
||||
@echo "frontend-dev Run the frontend in developer mode on localhost:5173"
|
||||
@echo "installer-zip Build the installer .zip file for the current version"
|
||||
@echo "tag-release Tag the GitHub repository with the current version (use at release time only!)"
|
||||
@echo "ruff Run ruff, fixing any safely-fixable errors and formatting"
|
||||
@echo "ruff-unsafe Run ruff, fixing all fixable errors and formatting"
|
||||
@echo "mypy Run mypy using the config in pyproject.toml to identify type mismatches and other coding errors"
|
||||
@echo "mypy-all Run mypy ignoring the config in pyproject.tom but still ignoring missing imports"
|
||||
@echo "test Run the unit tests."
|
||||
@echo "update-config-docstring Update the app's config docstring so mkdocs can autogenerate it correctly."
|
||||
@echo "frontend-install Install the pnpm modules needed for the front end"
|
||||
@echo "frontend-build Build the frontend in order to run on localhost:9090"
|
||||
@echo "frontend-dev Run the frontend in developer mode on localhost:5173"
|
||||
@echo "frontend-typegen Generate types for the frontend from the OpenAPI schema"
|
||||
@echo "installer-zip Build the installer .zip file for the current version"
|
||||
@echo "tag-release Tag the GitHub repository with the current version (use at release time only!)"
|
||||
|
||||
# Runs ruff, fixing any safely-fixable errors and formatting
|
||||
ruff:
|
||||
@ -40,6 +42,10 @@ mypy-all:
|
||||
test:
|
||||
pytest ./tests
|
||||
|
||||
# Update config docstring
|
||||
update-config-docstring:
|
||||
python scripts/update_config_docstring.py
|
||||
|
||||
# Install the pnpm modules needed for the front end
|
||||
frontend-install:
|
||||
rm -rf invokeai/frontend/web/node_modules
|
||||
@ -53,6 +59,9 @@ frontend-build:
|
||||
frontend-dev:
|
||||
cd invokeai/frontend/web && pnpm dev
|
||||
|
||||
frontend-typegen:
|
||||
cd invokeai/frontend/web && python ../../../scripts/generate_openapi_schema.py | pnpm typegen
|
||||
|
||||
# Installer zip file
|
||||
installer-zip:
|
||||
cd installer && ./create_installer.sh
|
||||
|
@ -2,17 +2,25 @@
|
||||
## Any environment variables supported by InvokeAI can be specified here,
|
||||
## in addition to the examples below.
|
||||
|
||||
# HOST_INVOKEAI_ROOT is the path on the docker host's filesystem where InvokeAI will store data.
|
||||
# Outputs will also be stored here by default.
|
||||
# If relative, it will be relative to the docker directory in which the docker-compose.yml file is located
|
||||
#HOST_INVOKEAI_ROOT=../../invokeai-data
|
||||
|
||||
# INVOKEAI_ROOT is the path to the root of the InvokeAI repository within the container.
|
||||
## INVOKEAI_ROOT is the path *on the host system* where Invoke will store its data.
|
||||
## It is mounted into the container and allows both containerized and non-containerized usage of Invoke.
|
||||
# Usually this is the only variable you need to set. It can be relative or absolute.
|
||||
# INVOKEAI_ROOT=~/invokeai
|
||||
|
||||
# Get this value from your HuggingFace account settings page.
|
||||
# HUGGING_FACE_HUB_TOKEN=
|
||||
## HOST_INVOKEAI_ROOT and CONTAINER_INVOKEAI_ROOT can be used to control the on-host
|
||||
## and in-container paths separately, if needed.
|
||||
## HOST_INVOKEAI_ROOT is the path on the docker host's filesystem where Invoke will store data.
|
||||
## If relative, it will be relative to the docker directory in which the docker-compose.yml file is located
|
||||
## CONTAINER_INVOKEAI_ROOT is the path within the container where Invoke will expect to find the runtime directory.
|
||||
## It MUST be absolute. There is usually no need to change this.
|
||||
# HOST_INVOKEAI_ROOT=../../invokeai-data
|
||||
# CONTAINER_INVOKEAI_ROOT=/invokeai
|
||||
|
||||
## optional variables specific to the docker setup.
|
||||
## INVOKEAI_PORT is the port on which the InvokeAI web interface will be available
|
||||
# INVOKEAI_PORT=9090
|
||||
|
||||
## GPU_DRIVER can be set to either `nvidia` or `rocm` to enable GPU support in the container accordingly.
|
||||
# GPU_DRIVER=nvidia #| rocm
|
||||
|
||||
## CONTAINER_UID can be set to the UID of the user on the host system that should own the files in the container.
|
||||
# CONTAINER_UID=1000
|
||||
|
@ -18,8 +18,6 @@ ENV INVOKEAI_SRC=/opt/invokeai
|
||||
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
ARG TORCH_VERSION=2.1.2
|
||||
ARG TORCHVISION_VERSION=0.16.2
|
||||
ARG GPU_DRIVER=cuda
|
||||
ARG TARGETPLATFORM="linux/amd64"
|
||||
# unused but available
|
||||
@ -27,7 +25,12 @@ ARG BUILDPLATFORM
|
||||
|
||||
WORKDIR ${INVOKEAI_SRC}
|
||||
|
||||
# Install pytorch before all other pip packages
|
||||
COPY invokeai ./invokeai
|
||||
COPY pyproject.toml ./
|
||||
|
||||
# Editable mode helps use the same image for development:
|
||||
# the local working copy can be bind-mounted into the image
|
||||
# at path defined by ${INVOKEAI_SRC}
|
||||
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
|
||||
# x86_64/CUDA is default
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
@ -39,20 +42,10 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
else \
|
||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu121"; \
|
||||
fi &&\
|
||||
pip install $extra_index_url_arg \
|
||||
torch==$TORCH_VERSION \
|
||||
torchvision==$TORCHVISION_VERSION
|
||||
|
||||
# Install the local package.
|
||||
# Editable mode helps use the same image for development:
|
||||
# the local working copy can be bind-mounted into the image
|
||||
# at path defined by ${INVOKEAI_SRC}
|
||||
COPY invokeai ./invokeai
|
||||
COPY pyproject.toml ./
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
# xformers + triton fails to install on arm64
|
||||
if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||
pip install -e ".[xformers]"; \
|
||||
pip install $extra_index_url_arg -e ".[xformers]"; \
|
||||
else \
|
||||
pip install $extra_index_url_arg -e "."; \
|
||||
fi
|
||||
@ -101,6 +94,8 @@ RUN apt update && apt install -y --no-install-recommends \
|
||||
ENV INVOKEAI_SRC=/opt/invokeai
|
||||
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||
ENV INVOKEAI_ROOT=/invokeai
|
||||
ENV INVOKEAI_HOST=0.0.0.0
|
||||
ENV INVOKEAI_PORT=9090
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$INVOKEAI_SRC:$PATH"
|
||||
ENV CONTAINER_UID=${CONTAINER_UID:-1000}
|
||||
ENV CONTAINER_GID=${CONTAINER_GID:-1000}
|
||||
@ -125,4 +120,4 @@ RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${IN
|
||||
|
||||
COPY docker/docker-entrypoint.sh ./
|
||||
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
|
||||
CMD ["invokeai-web", "--host", "0.0.0.0"]
|
||||
CMD ["invokeai-web"]
|
||||
|
@ -8,35 +8,28 @@ x-invokeai: &invokeai
|
||||
context: ..
|
||||
dockerfile: docker/Dockerfile
|
||||
|
||||
# variables without a default will automatically inherit from the host environment
|
||||
environment:
|
||||
- INVOKEAI_ROOT
|
||||
- HF_HOME
|
||||
|
||||
# Create a .env file in the same directory as this docker-compose.yml file
|
||||
# and populate it with environment variables. See .env.sample
|
||||
env_file:
|
||||
- .env
|
||||
|
||||
# variables without a default will automatically inherit from the host environment
|
||||
environment:
|
||||
# if set, CONTAINER_INVOKEAI_ROOT will override the Invoke runtime directory location *inside* the container
|
||||
- INVOKEAI_ROOT=${CONTAINER_INVOKEAI_ROOT:-/invokeai}
|
||||
- HF_HOME
|
||||
ports:
|
||||
- "${INVOKEAI_PORT:-9090}:9090"
|
||||
- "${INVOKEAI_PORT:-9090}:${INVOKEAI_PORT:-9090}"
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ${HOST_INVOKEAI_ROOT:-${INVOKEAI_ROOT:-~/invokeai}}
|
||||
target: ${INVOKEAI_ROOT:-/invokeai}
|
||||
target: ${CONTAINER_INVOKEAI_ROOT:-/invokeai}
|
||||
bind:
|
||||
create_host_path: true
|
||||
- ${HF_HOME:-~/.cache/huggingface}:${HF_HOME:-/invokeai/.cache/huggingface}
|
||||
# - ${INVOKEAI_MODELS_DIR:-${INVOKEAI_ROOT:-/invokeai/models}}
|
||||
# - ${INVOKEAI_MODELS_CONFIG_PATH:-${INVOKEAI_ROOT:-/invokeai/configs/models.yaml}}
|
||||
tty: true
|
||||
stdin_open: true
|
||||
|
||||
# # Example of running alternative commands/scripts in the container
|
||||
# command:
|
||||
# - bash
|
||||
# - -c
|
||||
# - |
|
||||
# invokeai-model-install --yes --default-only --config_file ${INVOKEAI_ROOT}/config_custom.yaml
|
||||
# invokeai-nodes-web --host 0.0.0.0
|
||||
|
||||
services:
|
||||
invokeai-nvidia:
|
||||
|
@ -9,10 +9,6 @@ set -e -o pipefail
|
||||
### Set INVOKEAI_ROOT pointing to a valid runtime directory
|
||||
# Otherwise configure the runtime dir first.
|
||||
|
||||
### Configure the InvokeAI runtime directory (done by default)):
|
||||
# docker run --rm -it <this image> --configure
|
||||
# or skip with --no-configure
|
||||
|
||||
### Set the CONTAINER_UID envvar to match your user.
|
||||
# Ensures files created in the container are owned by you:
|
||||
# docker run --rm -it -v /some/path:/invokeai -e CONTAINER_UID=$(id -u) <this image>
|
||||
@ -22,27 +18,6 @@ USER_ID=${CONTAINER_UID:-1000}
|
||||
USER=ubuntu
|
||||
usermod -u ${USER_ID} ${USER} 1>/dev/null
|
||||
|
||||
configure() {
|
||||
# Configure the runtime directory
|
||||
if [[ -f ${INVOKEAI_ROOT}/invokeai.yaml ]]; then
|
||||
echo "${INVOKEAI_ROOT}/invokeai.yaml exists. InvokeAI is already configured."
|
||||
echo "To reconfigure InvokeAI, delete the above file."
|
||||
echo "======================================================================"
|
||||
else
|
||||
mkdir -p "${INVOKEAI_ROOT}"
|
||||
chown --recursive ${USER} "${INVOKEAI_ROOT}"
|
||||
gosu ${USER} invokeai-configure --yes --default_only
|
||||
fi
|
||||
}
|
||||
|
||||
## Skip attempting to configure.
|
||||
## Must be passed first, before any other args.
|
||||
if [[ $1 != "--no-configure" ]]; then
|
||||
configure
|
||||
else
|
||||
shift
|
||||
fi
|
||||
|
||||
### Set the $PUBLIC_KEY env var to enable SSH access.
|
||||
# We do not install openssh-server in the image by default to avoid bloat.
|
||||
# but it is useful to have the full SSH server e.g. on Runpod.
|
||||
@ -58,7 +33,8 @@ if [[ -v "PUBLIC_KEY" ]] && [[ ! -d "${HOME}/.ssh" ]]; then
|
||||
service ssh start
|
||||
fi
|
||||
|
||||
|
||||
mkdir -p "${INVOKEAI_ROOT}"
|
||||
chown --recursive ${USER} "${INVOKEAI_ROOT}"
|
||||
cd "${INVOKEAI_ROOT}"
|
||||
|
||||
# Run the CMD as the Container User (not root).
|
||||
|
@ -16,11 +16,6 @@ model. These are the:
|
||||
information. It is also responsible for managing the InvokeAI
|
||||
`models` directory and its contents.
|
||||
|
||||
* _ModelMetadataStore_ and _ModelMetaDataFetch_ Backend modules that
|
||||
are able to retrieve metadata from online model repositories,
|
||||
transform them into Pydantic models, and cache them to the InvokeAI
|
||||
SQL database.
|
||||
|
||||
* _DownloadQueueServiceBase_
|
||||
A multithreaded downloader responsible
|
||||
for downloading models from a remote source to disk. The download
|
||||
@ -383,16 +378,13 @@ functionality:
|
||||
* Downloading a model from an arbitrary URL and installing it in
|
||||
`models_dir`.
|
||||
|
||||
* Special handling for Civitai model URLs which allow the user to
|
||||
paste in a model page's URL or download link
|
||||
|
||||
* Special handling for HuggingFace repo_ids to recursively download
|
||||
the contents of the repository, paying attention to alternative
|
||||
variants such as fp16.
|
||||
|
||||
* Saving tags and other metadata about the model into the invokeai database
|
||||
when fetching from a repo that provides that type of information,
|
||||
(currently only Civitai and HuggingFace).
|
||||
(currently only HuggingFace).
|
||||
|
||||
### Initializing the installer
|
||||
|
||||
@ -436,7 +428,6 @@ required parameters:
|
||||
| `app_config` | InvokeAIAppConfig | InvokeAI app configuration object |
|
||||
| `record_store` | ModelRecordServiceBase | Config record storage database |
|
||||
| `download_queue` | DownloadQueueServiceBase | Download queue object |
|
||||
| `metadata_store` | Optional[ModelMetadataStore] | Metadata storage object |
|
||||
|`session` | Optional[requests.Session] | Swap in a different Session object (usually for debugging) |
|
||||
|
||||
Once initialized, the installer will provide the following methods:
|
||||
@ -580,33 +571,7 @@ The `AnyHttpUrl` class can be imported from `pydantic.networks`.
|
||||
|
||||
Ordinarily, no metadata is retrieved from these sources. However,
|
||||
there is special-case code in the installer that looks for HuggingFace
|
||||
and Civitai URLs and fetches the corresponding model metadata from
|
||||
the corresponding repo.
|
||||
|
||||
#### CivitaiModelSource
|
||||
|
||||
This is used for a model that is hosted by the Civitai web site.
|
||||
|
||||
| **Argument** | **Type** | **Default** | **Description** |
|
||||
|------------------|------------------------------|-------------|-------------------------------------------|
|
||||
| `version_id` | int | None | The ID of the particular version of the desired model. |
|
||||
| `access_token` | str | None | An access token needed to gain access to a subscriber's-only model. |
|
||||
|
||||
Civitai has two model IDs, both of which are integers. The `model_id`
|
||||
corresponds to a collection of model versions that may different in
|
||||
arbitrary ways, such as derivation from different checkpoint training
|
||||
steps, SFW vs NSFW generation, pruned vs non-pruned, etc. The
|
||||
`version_id` points to a specific version. Please use the latter.
|
||||
|
||||
Some Civitai models require an access token to download. These can be
|
||||
generated from the Civitai profile page of a logged-in
|
||||
account. Somewhat annoyingly, if you fail to provide the access token
|
||||
when downloading a model that needs it, Civitai generates a redirect
|
||||
to a login page rather than a 403 Forbidden error. The installer
|
||||
attempts to catch this event and issue an informative error
|
||||
message. Otherwise you will get an "unrecognized model suffix" error
|
||||
when the model prober tries to identify the type of the HTML login
|
||||
page.
|
||||
and fetches the corresponding model metadata from the corresponding repo.
|
||||
|
||||
#### HFModelSource
|
||||
|
||||
@ -1253,9 +1218,9 @@ queue and have not yet reached a terminal state.
|
||||
|
||||
The modules found under `invokeai.backend.model_manager.metadata`
|
||||
provide a straightforward API for fetching model metadatda from online
|
||||
repositories. Currently two repositories are supported: HuggingFace
|
||||
and Civitai. However, the modules are easily extended for additional
|
||||
repos, provided that they have defined APIs for metadata access.
|
||||
repositories. Currently only HuggingFace is supported. However, the
|
||||
modules are easily extended for additional repos, provided that they
|
||||
have defined APIs for metadata access.
|
||||
|
||||
Metadata comprises any descriptive information that is not essential
|
||||
for getting the model to run. For example "author" is metadata, while
|
||||
@ -1267,37 +1232,16 @@ model's config, as defined in `invokeai.backend.model_manager.config`.
|
||||
```
|
||||
from invokeai.backend.model_manager.metadata import (
|
||||
AnyModelRepoMetadata,
|
||||
CivitaiMetadataFetch,
|
||||
CivitaiMetadata
|
||||
ModelMetadataStore,
|
||||
)
|
||||
# to access the initialized sql database
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
|
||||
civitai = CivitaiMetadataFetch()
|
||||
hf = HuggingFaceMetadataFetch()
|
||||
|
||||
# fetch the metadata
|
||||
model_metadata = civitai.from_url("https://civitai.com/models/215796")
|
||||
model_metadata = hf.from_id("<repo_id>")
|
||||
|
||||
# get some common metadata fields
|
||||
author = model_metadata.author
|
||||
tags = model_metadata.tags
|
||||
|
||||
# get some Civitai-specific fields
|
||||
assert isinstance(model_metadata, CivitaiMetadata)
|
||||
|
||||
trained_words = model_metadata.trained_words
|
||||
base_model = model_metadata.base_model_trained_on
|
||||
thumbnail = model_metadata.thumbnail_url
|
||||
|
||||
# cache the metadata to the database using the key corresponding to
|
||||
# an existing model config record in the `model_config` table
|
||||
sql_cache = ModelMetadataStore(ApiDependencies.invoker.services.db)
|
||||
sql_cache.add_metadata('fb237ace520b6716adc98bcb16e8462c', model_metadata)
|
||||
|
||||
# now we can search the database by tag, author or model name
|
||||
# matches will contain a list of model keys that match the search
|
||||
matches = sql_cache.search_by_tag({"tool", "turbo"})
|
||||
assert isinstance(model_metadata, HuggingFaceMetadata)
|
||||
```
|
||||
|
||||
### Structure of the Metadata objects
|
||||
@ -1334,52 +1278,14 @@ This descends from `ModelMetadataBase` and adds the following fields:
|
||||
| `last_modified`| datetime | Date of last commit of this model to the repo |
|
||||
| `files` | List[Path] | List of the files in the model repo |
|
||||
|
||||
#### `CivitaiMetadata`
|
||||
|
||||
This descends from `ModelMetadataBase` and adds the following fields:
|
||||
|
||||
| **Field Name** | **Type** | **Description** |
|
||||
|----------------|-----------------|------------------|
|
||||
| `type` | Literal["civitai"] | Used for the discriminated union of metadata classes|
|
||||
| `id` | int | Civitai model id |
|
||||
| `version_name` | str | Name of this version of the model (distinct from model name) |
|
||||
| `version_id` | int | Civitai model version id (distinct from model id) |
|
||||
| `created` | datetime | Date this version of the model was created |
|
||||
| `updated` | datetime | Date this version of the model was last updated |
|
||||
| `published` | datetime | Date this version of the model was published to Civitai |
|
||||
| `description` | str | Model description. Quite verbose and contains HTML tags |
|
||||
| `version_description` | str | Model version description, usually describes changes to the model |
|
||||
| `nsfw` | bool | Whether the model tends to generate NSFW content |
|
||||
| `restrictions` | LicenseRestrictions | An object that describes what is and isn't allowed with this model |
|
||||
| `trained_words`| Set[str] | Trigger words for this model, if any |
|
||||
| `download_url` | AnyHttpUrl | URL for downloading this version of the model |
|
||||
| `base_model_trained_on` | str | Name of the model that this version was trained on |
|
||||
| `thumbnail_url` | AnyHttpUrl | URL to access a representative thumbnail image of the model's output |
|
||||
| `weight_min` | int | For LoRA sliders, the minimum suggested weight to apply |
|
||||
| `weight_max` | int | For LoRA sliders, the maximum suggested weight to apply |
|
||||
|
||||
Note that `weight_min` and `weight_max` are not currently populated
|
||||
and take the default values of (-1.0, +2.0). The issue is that these
|
||||
values aren't part of the structured data but appear in the text
|
||||
description. Some regular expression or LLM coding may be able to
|
||||
extract these values.
|
||||
|
||||
Also be aware that `base_model_trained_on` is free text and doesn't
|
||||
correspond to our `ModelType` enum.
|
||||
|
||||
`CivitaiMetadata` also defines some convenience properties relating to
|
||||
licensing restrictions: `credit_required`, `allow_commercial_use`,
|
||||
`allow_derivatives` and `allow_different_license`.
|
||||
|
||||
#### `AnyModelRepoMetadata`
|
||||
|
||||
This is a discriminated Union of `CivitaiMetadata` and
|
||||
`HuggingFaceMetadata`.
|
||||
This is a discriminated Union of `HuggingFaceMetadata`.
|
||||
|
||||
### Fetching Metadata from Online Repos
|
||||
|
||||
The `HuggingFaceMetadataFetch` and `CivitaiMetadataFetch` classes will
|
||||
retrieve metadata from their corresponding repositories and return
|
||||
The `HuggingFaceMetadataFetch` class will
|
||||
retrieve metadata from its corresponding repository and return
|
||||
`AnyModelRepoMetadata` objects. Their base class
|
||||
`ModelMetadataFetchBase` is an abstract class that defines two
|
||||
methods: `from_url()` and `from_id()`. The former accepts the type of
|
||||
@ -1397,96 +1303,17 @@ provide a `requests.Session` argument. This allows you to customize
|
||||
the low-level HTTP fetch requests and is used, for instance, in the
|
||||
testing suite to avoid hitting the internet.
|
||||
|
||||
The HuggingFace and Civitai fetcher subclasses add additional
|
||||
repo-specific fetching methods:
|
||||
The HuggingFace fetcher subclass add additional repo-specific fetching methods:
|
||||
|
||||
#### HuggingFaceMetadataFetch
|
||||
|
||||
This overrides its base class `from_json()` method to return a
|
||||
`HuggingFaceMetadata` object directly.
|
||||
|
||||
#### CivitaiMetadataFetch
|
||||
|
||||
This adds the following methods:
|
||||
|
||||
`from_civitai_modelid()` This takes the ID of a model, finds the
|
||||
default version of the model, and then retrieves the metadata for
|
||||
that version, returning a `CivitaiMetadata` object directly.
|
||||
|
||||
`from_civitai_versionid()` This takes the ID of a model version and
|
||||
retrieves its metadata. Functionally equivalent to `from_id()`, the
|
||||
only difference is that it returna a `CivitaiMetadata` object rather
|
||||
than an `AnyModelRepoMetadata`.
|
||||
|
||||
### Metadata Storage
|
||||
|
||||
The `ModelMetadataStore` provides a simple facility to store model
|
||||
metadata in the `invokeai.db` database. The data is stored as a JSON
|
||||
blob, with a few common fields (`name`, `author`, `tags`) broken out
|
||||
to be searchable.
|
||||
|
||||
When a metadata object is saved to the database, it is identified
|
||||
using the model key, _and this key must correspond to an existing
|
||||
model key in the model_config table_. There is a foreign key integrity
|
||||
constraint between the `model_config.id` field and the
|
||||
`model_metadata.id` field such that if you attempt to save metadata
|
||||
under an unknown key, the attempt will result in an
|
||||
`UnknownModelException`. Likewise, when a model is deleted from
|
||||
`model_config`, the deletion of the corresponding metadata record will
|
||||
be triggered.
|
||||
|
||||
Tags are stored in a normalized fashion in the tables `model_tags` and
|
||||
`tags`. Triggers keep the tag table in sync with the `model_metadata`
|
||||
table.
|
||||
|
||||
To create the storage object, initialize it with the InvokeAI
|
||||
`SqliteDatabase` object. This is often done this way:
|
||||
|
||||
```
|
||||
from invokeai.app.api.dependencies import ApiDependencies
|
||||
metadata_store = ModelMetadataStore(ApiDependencies.invoker.services.db)
|
||||
```
|
||||
|
||||
You can then access the storage with the following methods:
|
||||
|
||||
#### `add_metadata(key, metadata)`
|
||||
|
||||
Add the metadata using a previously-defined model key.
|
||||
|
||||
There is currently no `delete_metadata()` method. The metadata will
|
||||
persist until the matching config is deleted from the `model_config`
|
||||
table.
|
||||
|
||||
#### `get_metadata(key) -> AnyModelRepoMetadata`
|
||||
|
||||
Retrieve the metadata corresponding to the model key.
|
||||
|
||||
#### `update_metadata(key, new_metadata)`
|
||||
|
||||
Update an existing metadata record with new metadata.
|
||||
|
||||
#### `search_by_tag(tags: Set[str]) -> Set[str]`
|
||||
|
||||
Given a set of tags, find models that are tagged with them. If
|
||||
multiple tags are provided then a matching model must be tagged with
|
||||
*all* the tags in the set. This method returns a set of model keys and
|
||||
is intended to be used in conjunction with the `ModelRecordService`:
|
||||
|
||||
```
|
||||
model_config_store = ApiDependencies.invoker.services.model_records
|
||||
matches = metadata_store.search_by_tag({'license:other'})
|
||||
models = [model_config_store.get(x) for x in matches]
|
||||
```
|
||||
|
||||
#### `search_by_name(name: str) -> Set[str]
|
||||
|
||||
Find all model metadata records that have the given name and return a
|
||||
set of keys to the corresponding model config objects.
|
||||
|
||||
#### `search_by_author(author: str) -> Set[str]
|
||||
|
||||
Find all model metadata records that have the given author and return
|
||||
a set of keys to the corresponding model config objects.
|
||||
The `ModelConfigBase` stores this response in the `source_api_response` field
|
||||
as a JSON blob.
|
||||
|
||||
***
|
||||
|
||||
|
133
docs/contributing/frontend/OVERVIEW.md
Normal file
133
docs/contributing/frontend/OVERVIEW.md
Normal file
@ -0,0 +1,133 @@
|
||||
# Invoke UI
|
||||
|
||||
Invoke's UI is made possible by many contributors and open-source libraries. Thank you!
|
||||
|
||||
## Dev environment
|
||||
|
||||
### Setup
|
||||
|
||||
1. Install [node] and [pnpm].
|
||||
1. Run `pnpm i` to install all packages.
|
||||
|
||||
#### Run in dev mode
|
||||
|
||||
1. From `invokeai/frontend/web/`, run `pnpm dev`.
|
||||
1. From repo root, run `python scripts/invokeai-web.py`.
|
||||
1. Point your browser to the dev server address, e.g. <http://localhost:5173/>
|
||||
|
||||
### Package scripts
|
||||
|
||||
- `dev`: run the frontend in dev mode, enabling hot reloading
|
||||
- `build`: run all checks (madge, eslint, prettier, tsc) and then build the frontend
|
||||
- `typegen`: generate types from the OpenAPI schema (see [Type generation])
|
||||
- `lint:dpdm`: check circular dependencies
|
||||
- `lint:eslint`: check code quality
|
||||
- `lint:prettier`: check code formatting
|
||||
- `lint:tsc`: check type issues
|
||||
- `lint:knip`: check for unused exports or objects (failures here are just suggestions, not hard fails)
|
||||
- `lint`: run all checks concurrently
|
||||
- `fix`: run `eslint` and `prettier`, fixing fixable issues
|
||||
|
||||
### Type generation
|
||||
|
||||
We use [openapi-typescript] to generate types from the app's OpenAPI schema.
|
||||
|
||||
The generated types are committed to the repo in [schema.ts].
|
||||
|
||||
```sh
|
||||
# from the repo root, start the server
|
||||
python scripts/invokeai-web.py
|
||||
# from invokeai/frontend/web/, run the script
|
||||
pnpm typegen
|
||||
```
|
||||
|
||||
### Localization
|
||||
|
||||
We use [i18next] for localization, but translation to languages other than English happens on our [Weblate] project.
|
||||
|
||||
Only the English source strings should be changed on this repo.
|
||||
|
||||
### VSCode
|
||||
|
||||
#### Example debugger config
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"type": "chrome",
|
||||
"request": "launch",
|
||||
"name": "Invoke UI",
|
||||
"url": "http://localhost:5173",
|
||||
"webRoot": "${workspaceFolder}/invokeai/frontend/web"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### Remote dev
|
||||
|
||||
We've noticed an intermittent timeout issue with the VSCode remote dev port forwarding.
|
||||
|
||||
We suggest disabling the editor's port forwarding feature and doing it manually via SSH:
|
||||
|
||||
```sh
|
||||
ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@host
|
||||
```
|
||||
|
||||
## Contributing Guidelines
|
||||
|
||||
Thanks for your interest in contributing to the Invoke Web UI!
|
||||
|
||||
Please follow these guidelines when contributing.
|
||||
|
||||
### Check in before investing your time
|
||||
|
||||
Please check in before you invest your time on anything besides a trivial fix, in case it conflicts with ongoing work or isn't aligned with the vision for the app.
|
||||
|
||||
If a feature request or issue doesn't already exist for the thing you want to work on, please create one.
|
||||
|
||||
Ping `@psychedelicious` on [discord] in the `#frontend-dev` channel or in the feature request / issue you want to work on - we're happy to chat.
|
||||
|
||||
### Code conventions
|
||||
|
||||
- This is a fairly complex app with a deep component tree. Please use memoization (`useCallback`, `useMemo`, `memo`) with enthusiasm.
|
||||
- If you need to add some global, ephemeral state, please use [nanostores] if possible.
|
||||
- Be careful with your redux selectors. If they need to be parameterized, consider creating them inside a `useMemo`.
|
||||
- Feel free to use `lodash` (via `lodash-es`) to make the intent of your code clear.
|
||||
- Please add comments describing the "why", not the "how" (unless it is really arcane).
|
||||
|
||||
### Commit format
|
||||
|
||||
Please use the [conventional commits] spec for the web UI, with a scope of "ui":
|
||||
|
||||
- `chore(ui): bump deps`
|
||||
- `chore(ui): lint`
|
||||
- `feat(ui): add some cool new feature`
|
||||
- `fix(ui): fix some bug`
|
||||
|
||||
### Submitting a PR
|
||||
|
||||
- Ensure your branch is tidy. Use an interactive rebase to clean up the commit history and reword the commit messages if they are not descriptive.
|
||||
- Run `pnpm lint`. Some issues are auto-fixable with `pnpm fix`.
|
||||
- Fill out the PR form when creating the PR.
|
||||
- It doesn't need to be super detailed, but a screenshot or video is nice if you changed something visually.
|
||||
- If a section isn't relevant, delete it. There are no UI tests at this time.
|
||||
|
||||
## Other docs
|
||||
|
||||
- [Workflows - Design and Implementation]
|
||||
- [State Management]
|
||||
|
||||
[node]: https://nodejs.org/en/download/
|
||||
[pnpm]: https://github.com/pnpm/pnpm
|
||||
[discord]: https://discord.gg/ZmtBAhwWhy
|
||||
[i18next]: https://github.com/i18next/react-i18next
|
||||
[Weblate]: https://hosted.weblate.org/engage/invokeai/
|
||||
[openapi-typescript]: https://github.com/drwpow/openapi-typescript
|
||||
[Type generation]: #type-generation
|
||||
[schema.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/services/api/schema.ts
|
||||
[conventional commits]: https://www.conventionalcommits.org/en/v1.0.0/
|
||||
[Workflows - Design and Implementation]: ./WORKFLOWS.md
|
||||
[State Management]: ./STATE_MGMT.md
|
@ -1,40 +1,5 @@
|
||||
# Workflows - Design and Implementation
|
||||
|
||||
<!-- @import "[TOC]" {cmd="toc" depthFrom=1 depthTo=6 orderedList=false} -->
|
||||
|
||||
<!-- code_chunk_output -->
|
||||
|
||||
- [Workflows - Design and Implementation](#workflows---design-and-implementation)
|
||||
- [Design](#design)
|
||||
- [Linear UI](#linear-ui)
|
||||
- [Workflow Editor](#workflow-editor)
|
||||
- [Workflows](#workflows)
|
||||
- [Workflow -> reactflow state -> InvokeAI graph](#workflow---reactflow-state---invokeai-graph)
|
||||
- [Nodes vs Invocations](#nodes-vs-invocations)
|
||||
- [Workflow Linear View](#workflow-linear-view)
|
||||
- [OpenAPI Schema](#openapi-schema)
|
||||
- [Field Instances and Templates](#field-instances-and-templates)
|
||||
- [Stateful vs Stateless Fields](#stateful-vs-stateless-fields)
|
||||
- [Collection and Polymorphic Fields](#collection-and-polymorphic-fields)
|
||||
- [Implementation](#implementation)
|
||||
- [zod Schemas and Types](#zod-schemas-and-types)
|
||||
- [OpenAPI Schema Parsing](#openapi-schema-parsing)
|
||||
- [Parsing Field Types](#parsing-field-types)
|
||||
- [Primitive Types](#primitive-types)
|
||||
- [Complex Types](#complex-types)
|
||||
- [Collection Types](#collection-types)
|
||||
- [Collection or Scalar Types](#collection-or-scalar-types)
|
||||
- [Optional Fields](#optional-fields)
|
||||
- [Building Field Input Templates](#building-field-input-templates)
|
||||
- [Building Field Output Templates](#building-field-output-templates)
|
||||
- [Managing reactflow State](#managing-reactflow-state)
|
||||
- [Building Nodes and Edges](#building-nodes-and-edges)
|
||||
- [Building a Workflow](#building-a-workflow)
|
||||
- [Loading a Workflow](#loading-a-workflow)
|
||||
- [Workflow Migrations](#workflow-migrations)
|
||||
|
||||
<!-- /code_chunk_output -->
|
||||
|
||||
> This document describes, at a high level, the design and implementation of workflows in the InvokeAI frontend. There are a substantial number of implementation details not included, but which are hopefully clear from the code.
|
||||
|
||||
InvokeAI's backend uses graphs, composed of **nodes** and **edges**, to process data and generate images.
|
||||
@ -152,13 +117,13 @@ Stateless fields do not store their value in the node, so their field instances
|
||||
|
||||
"Custom" fields will always be treated as stateless fields.
|
||||
|
||||
##### Collection and Polymorphic Fields
|
||||
##### Collection and Scalar Fields
|
||||
|
||||
Field types have a name and two flags which may identify it as a **collection** or **polymorphic** field.
|
||||
Field types have a name and two flags which may identify it as a **collection** or **collection or scalar** field.
|
||||
|
||||
If a field is annotated in python as a list, its field type is parsed and flagged as a collection type (e.g. `list[int]`).
|
||||
If a field is annotated in python as a list, its field type is parsed and flagged as a **collection** type (e.g. `list[int]`).
|
||||
|
||||
If it is annotated as a union of a type and list, the type will be flagged as a polymorphic type (e.g. `Union[int, list[int]]`). Fields may not be unions of different types (e.g. `Union[int, list[str]]` and `Union[int, str]` are not allowed).
|
||||
If it is annotated as a union of a type and list, the type will be flagged as a **collection or scalar** type (e.g. `Union[int, list[int]]`). Fields may not be unions of different types (e.g. `Union[int, list[str]]` and `Union[int, str]` are not allowed).
|
||||
|
||||
## Implementation
|
||||
|
||||
@ -338,13 +303,13 @@ Migration logic is in [migrations.ts].
|
||||
[reactflow]: https://github.com/xyflow/xyflow 'reactflow'
|
||||
[reactflow-concepts]: https://reactflow.dev/learn/concepts/terms-and-definitions
|
||||
[reactflow-events]: https://reactflow.dev/api-reference/react-flow#event-handlers
|
||||
[buildWorkflow.ts]: ../src/features/nodes/util/workflow/buildWorkflow.ts
|
||||
[nodesSlice.ts]: ../src/features/nodes/store/nodesSlice.ts
|
||||
[buildLinearTextToImageGraph.ts]: ../src/features/nodes/util/graph/buildLinearTextToImageGraph.ts
|
||||
[buildNodesGraph.ts]: ../src/features/nodes/util/graph/buildNodesGraph.ts
|
||||
[buildInvocationNode.ts]: ../src/features/nodes/util/node/buildInvocationNode.ts
|
||||
[validateWorkflow.ts]: ../src/features/nodes/util/workflow/validateWorkflow.ts
|
||||
[migrations.ts]: ../src/features/nodes/util/workflow/migrations.ts
|
||||
[parseSchema.ts]: ../src/features/nodes/util/schema/parseSchema.ts
|
||||
[buildFieldInputTemplate.ts]: ../src/features/nodes/util/schema/buildFieldInputTemplate.ts
|
||||
[buildFieldOutputTemplate.ts]: ../src/features/nodes/util/schema/buildFieldOutputTemplate.ts
|
||||
[buildWorkflow.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/workflow/buildWorkflow.ts
|
||||
[nodesSlice.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/store/nodesSlice.ts
|
||||
[buildLinearTextToImageGraph.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/graph/buildLinearTextToImageGraph.ts
|
||||
[buildNodesGraph.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/graph/buildNodesGraph.ts
|
||||
[buildInvocationNode.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/node/buildInvocationNode.ts
|
||||
[validateWorkflow.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/workflow/validateWorkflow.ts
|
||||
[migrations.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/workflow/migrations.ts
|
||||
[parseSchema.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/schema/parseSchema.ts
|
||||
[buildFieldInputTemplate.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldInputTemplate.ts
|
||||
[buildFieldOutputTemplate.ts]: https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/src/features/nodes/util/schema/buildFieldOutputTemplate.ts
|
@ -6,259 +6,161 @@ title: Configuration
|
||||
|
||||
## Intro
|
||||
|
||||
InvokeAI has numerous runtime settings which can be used to adjust
|
||||
many aspects of its operations, including the location of files and
|
||||
directories, memory usage, and performance. These settings can be
|
||||
viewed and customized in several ways:
|
||||
Runtime settings, including the location of files and
|
||||
directories, memory usage, and performance, are managed via the
|
||||
`invokeai.yaml` config file or environment variables. A subset
|
||||
of settings may be set via commandline arguments.
|
||||
|
||||
1. By editing settings in the `invokeai.yaml` file.
|
||||
2. By setting environment variables.
|
||||
3. On the command-line, when InvokeAI is launched.
|
||||
Settings sources are used in this order:
|
||||
|
||||
In addition, the most commonly changed settings are accessible
|
||||
graphically via the `invokeai-configure` script.
|
||||
- CLI args
|
||||
- Environment variables
|
||||
- `invokeai.yaml` settings
|
||||
- Fallback: defaults
|
||||
|
||||
### How the Configuration System Works
|
||||
### InvokeAI Root Directory
|
||||
|
||||
When InvokeAI is launched, the very first thing it needs to do is to
|
||||
find its "root" directory, which contains its configuration files,
|
||||
installed models, its database of images, and the folder(s) of
|
||||
generated images themselves. In this document, the root directory will
|
||||
be referred to as ROOT.
|
||||
On startup, InvokeAI searches for its "root" directory. This is the directory
|
||||
that contains models, images, the database, and so on. It also contains
|
||||
a configuration file called `invokeai.yaml`.
|
||||
|
||||
#### Finding the Root Directory
|
||||
InvokeAI searches for the root directory in this order:
|
||||
|
||||
To find its root directory, InvokeAI uses the following recipe:
|
||||
1. The `--root <path>` CLI arg.
|
||||
2. The environment variable INVOKEAI_ROOT.
|
||||
3. The directory containing the currently active virtual environment.
|
||||
4. Fallback: a directory in the current user's home directory named `invokeai`.
|
||||
|
||||
1. It first looks for the argument `--root <path>` on the command line
|
||||
it was launched from, and uses the indicated path if present.
|
||||
### InvokeAI Configuration File
|
||||
|
||||
2. Next it looks for the environment variable INVOKEAI_ROOT, and uses
|
||||
the directory path found there if present.
|
||||
Inside the root directory, we read settings from the `invokeai.yaml` file.
|
||||
|
||||
3. If neither of these are present, then InvokeAI looks for the
|
||||
folder containing the `.venv` Python virtual environment directory for
|
||||
the currently active environment. This directory is checked for files
|
||||
expected inside the InvokeAI root before it is used.
|
||||
It has two sections - one for internal use and one for user settings:
|
||||
|
||||
4. Finally, InvokeAI looks for a directory in the current user's home
|
||||
directory named `invokeai`.
|
||||
```yaml
|
||||
# Internal metadata - do not edit:
|
||||
schema_version: 4
|
||||
|
||||
#### Reading the InvokeAI Configuration File
|
||||
|
||||
Once the root directory has been located, InvokeAI looks for a file
|
||||
named `ROOT/invokeai.yaml`, and if present reads configuration values
|
||||
from it. The top of this file looks like this:
|
||||
|
||||
```
|
||||
InvokeAI:
|
||||
Web Server:
|
||||
host: localhost
|
||||
port: 9090
|
||||
allow_origins: []
|
||||
allow_credentials: true
|
||||
allow_methods:
|
||||
- '*'
|
||||
allow_headers:
|
||||
- '*'
|
||||
Features:
|
||||
esrgan: true
|
||||
internet_available: true
|
||||
log_tokenization: false
|
||||
patchmatch: true
|
||||
restore: true
|
||||
...
|
||||
# Put user settings here - see https://invoke-ai.github.io/InvokeAI/features/CONFIGURATION/:
|
||||
host: 0.0.0.0 # serve the app on your local network
|
||||
models_dir: D:\invokeai\models # store models on an external drive
|
||||
precision: float16 # always use fp16 precision
|
||||
```
|
||||
|
||||
This lines in this file are used to establish default values for
|
||||
Invoke's settings. In the above fragment, the Web Server's listening
|
||||
port is set to 9090 by the `port` setting.
|
||||
The settings in this file will override the defaults. You only need
|
||||
to change this file if the default for a particular setting doesn't
|
||||
work for you.
|
||||
|
||||
You can edit this file with a text editor such as "Notepad" (do not
|
||||
use Word or any other word processor). When editing, be careful to
|
||||
maintain the indentation, and do not add extraneous text, as syntax
|
||||
errors will prevent InvokeAI from launching. A basic guide to the
|
||||
format of YAML files can be found
|
||||
[here](https://circleci.com/blog/what-is-yaml-a-beginner-s-guide/).
|
||||
Some settings, like [Model Marketplace API Keys], require the YAML
|
||||
to be formatted correctly. Here is a [basic guide to YAML files].
|
||||
|
||||
You can fix a broken `invokeai.yaml` by deleting it and running the
|
||||
configuration script again -- option [6] in the launcher, "Re-run the
|
||||
configure script".
|
||||
|
||||
#### Reading Environment Variables
|
||||
#### Custom Config File Location
|
||||
|
||||
Next InvokeAI looks for defined environment variables in the format
|
||||
`INVOKEAI_<setting_name>`, for example `INVOKEAI_port`. Environment
|
||||
variable values take precedence over configuration file variables. On
|
||||
a Macintosh system, for example, you could change the port that the
|
||||
web server listens on by setting the environment variable this way:
|
||||
You can use any config file with the `--config` CLI arg. Pass in the path to the `invokeai.yaml` file you want to use.
|
||||
|
||||
```
|
||||
export INVOKEAI_port=8000
|
||||
invokeai-web
|
||||
Note that environment variables will trump any settings in the config file.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
All settings may be set via environment variables by prefixing `INVOKEAI_`
|
||||
to the variable name. For example, `INVOKEAI_HOST` would set the `host`
|
||||
setting.
|
||||
|
||||
For non-primitive values, pass a JSON-encoded string:
|
||||
|
||||
```sh
|
||||
export INVOKEAI_REMOTE_API_TOKENS='[{"url_regex":"modelmarketplace", "token": "12345"}]'
|
||||
```
|
||||
|
||||
Please check out these
|
||||
[Macintosh](https://phoenixnap.com/kb/set-environment-variable-mac)
|
||||
and
|
||||
[Windows](https://phoenixnap.com/kb/windows-set-environment-variable)
|
||||
guides for setting temporary and permanent environment variables.
|
||||
We suggest using `invokeai.yaml`, as it is more user-friendly.
|
||||
|
||||
#### Reading the Command Line
|
||||
### CLI Args
|
||||
|
||||
Lastly, InvokeAI takes settings from the command line, which override
|
||||
everything else. The command-line settings have the same name as the
|
||||
corresponding configuration file settings, preceded by a `--`, for
|
||||
example `--port 8000`.
|
||||
A subset of settings may be specified using CLI args:
|
||||
|
||||
If you are using the launcher (`invoke.sh` or `invoke.bat`) to launch
|
||||
InvokeAI, then just pass the command-line arguments to the launcher:
|
||||
- `--root`: specify the root directory
|
||||
- `--config`: override the default `invokeai.yaml` file location
|
||||
|
||||
```
|
||||
invoke.bat --port 8000 --host 0.0.0.0
|
||||
### All Settings
|
||||
|
||||
Following the table are additional explanations for certain settings.
|
||||
|
||||
<!-- prettier-ignore-start -->
|
||||
::: invokeai.app.services.config.config_default.InvokeAIAppConfig
|
||||
options:
|
||||
heading_level: 4
|
||||
members: false
|
||||
show_docstring_description: false
|
||||
group_by_category: true
|
||||
show_category_heading: false
|
||||
<!-- prettier-ignore-end -->
|
||||
|
||||
#### Model Marketplace API Keys
|
||||
|
||||
Some model marketplaces require an API key to download models. You can provide a URL pattern and appropriate token in your `invokeai.yaml` file to provide that API key.
|
||||
|
||||
The pattern can be any valid regex (you may need to surround the pattern with quotes):
|
||||
|
||||
```yaml
|
||||
remote_api_tokens:
|
||||
# Any URL containing `models.com` will automatically use `your_models_com_token`
|
||||
- url_regex: models.com
|
||||
token: your_models_com_token
|
||||
# Any URL matching this contrived regex will use `some_other_token`
|
||||
- url_regex: '^[a-z]{3}whatever.*\.com$'
|
||||
token: some_other_token
|
||||
```
|
||||
|
||||
The arguments will be applied when you select the web server option
|
||||
(and the other options as well).
|
||||
The provided token will be added as a `Bearer` token to the network requests to download the model files. As far as we know, this works for all model marketplaces that require authorization.
|
||||
|
||||
If, on the other hand, you prefer to launch InvokeAI directly from the
|
||||
command line, you would first activate the virtual environment (known
|
||||
as the "developer's console" in the launcher), and run `invokeai-web`:
|
||||
#### Model Hashing
|
||||
|
||||
```
|
||||
> C:\Users\Fred\invokeai\.venv\scripts\activate
|
||||
(.venv) > invokeai-web --port 8000 --host 0.0.0.0
|
||||
Models are hashed during installation, providing a stable identifier for models across all platforms. Hashing is a one-time operation.
|
||||
|
||||
```yaml
|
||||
hashing_algorithm: blake3_single # default value
|
||||
```
|
||||
|
||||
You can get a listing and brief instructions for each of the
|
||||
command-line options by giving the `--help` argument:
|
||||
You might want to change this setting, depending on your system:
|
||||
|
||||
```
|
||||
(.venv) > invokeai-web --help
|
||||
usage: InvokeAI [-h] [--host HOST] [--port PORT] [--allow_origins [ALLOW_ORIGINS ...]] [--allow_credentials | --no-allow_credentials] [--allow_methods [ALLOW_METHODS ...]]
|
||||
[--allow_headers [ALLOW_HEADERS ...]] [--esrgan | --no-esrgan] [--internet_available | --no-internet_available] [--log_tokenization | --no-log_tokenization]
|
||||
[--patchmatch | --no-patchmatch] [--restore | --no-restore]
|
||||
[--always_use_cpu | --no-always_use_cpu] [--free_gpu_mem | --no-free_gpu_mem] [--max_loaded_models MAX_LOADED_MODELS] [--max_cache_size MAX_CACHE_SIZE]
|
||||
[--max_vram_cache_size MAX_VRAM_CACHE_SIZE] [--gpu_mem_reserved GPU_MEM_RESERVED] [--precision {auto,float16,float32,autocast}]
|
||||
[--sequential_guidance | --no-sequential_guidance] [--xformers_enabled | --no-xformers_enabled] [--tiled_decode | --no-tiled_decode] [--root ROOT]
|
||||
[--autoimport_dir AUTOIMPORT_DIR] [--lora_dir LORA_DIR] [--embedding_dir EMBEDDING_DIR] [--controlnet_dir CONTROLNET_DIR] [--conf_path CONF_PATH]
|
||||
[--models_dir MODELS_DIR] [--legacy_conf_dir LEGACY_CONF_DIR] [--db_dir DB_DIR] [--outdir OUTDIR] [--from_file FROM_FILE]
|
||||
[--use_memory_db | --no-use_memory_db] [--model MODEL] [--log_handlers [LOG_HANDLERS ...]] [--log_format {plain,color,syslog,legacy}]
|
||||
[--log_level {debug,info,warning,error,critical}] [--version | --no-version]
|
||||
```
|
||||
- `blake3_single` (default): Single-threaded - best for spinning HDDs, still OK for SSDs
|
||||
- `blake3_multi`: Parallelized, memory-mapped implementation - best for SSDs, terrible for spinning disks
|
||||
- `random`: Skip hashing entirely - fastest but of course no hash
|
||||
|
||||
## The Configuration Settings
|
||||
During the first startup after upgrading to v4, all of your models will be hashed. This can take a few minutes.
|
||||
|
||||
The configuration settings are divided into several distinct
|
||||
groups in `invokeia.yaml`:
|
||||
Most common algorithms are supported, like `md5`, `sha256`, and `sha512`. These are typically much, much slower than either of the BLAKE3 variants.
|
||||
|
||||
### Web Server
|
||||
|
||||
| Setting | Default Value | Description |
|
||||
|---------------------|---------------|----------------------------------------------------------------------------------------------------------------------------|
|
||||
| `host` | `localhost` | Name or IP address of the network interface that the web server will listen on |
|
||||
| `port` | `9090` | Network port number that the web server will listen on |
|
||||
| `allow_origins` | `[]` | A list of host names or IP addresses that are allowed to connect to the InvokeAI API in the format `['host1','host2',...]` |
|
||||
| `allow_credentials` | `true` | Require credentials for a foreign host to access the InvokeAI API (don't change this) |
|
||||
| `allow_methods` | `*` | List of HTTP methods ("GET", "POST") that the web server is allowed to use when accessing the API |
|
||||
| `allow_headers` | `*` | List of HTTP headers that the web server will accept when accessing the API |
|
||||
| `ssl_certfile` | null | Path to an SSL certificate file, used to enable HTTPS. |
|
||||
| `ssl_keyfile` | null | Path to an SSL keyfile, if the key is not included in the certificate file. |
|
||||
|
||||
The documentation for InvokeAI's API can be accessed by browsing to the following URL: [http://localhost:9090/docs].
|
||||
|
||||
### Features
|
||||
|
||||
These configuration settings allow you to enable and disable various InvokeAI features:
|
||||
|
||||
| Setting | Default Value | Description |
|
||||
|----------|----------------|--------------|
|
||||
| `esrgan` | `true` | Activate the ESRGAN upscaling options|
|
||||
| `internet_available` | `true` | When a resource is not available locally, try to fetch it via the internet |
|
||||
| `log_tokenization` | `false` | Before each text2image generation, print a color-coded representation of the prompt to the console; this can help understand why a prompt is not working as expected |
|
||||
| `patchmatch` | `true` | Activate the "patchmatch" algorithm for improved inpainting |
|
||||
|
||||
### Generation
|
||||
|
||||
These options tune InvokeAI's memory and performance characteristics.
|
||||
|
||||
| Setting | Default Value | Description |
|
||||
|-----------------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `sequential_guidance` | `false` | Calculate guidance in serial rather than in parallel, lowering memory requirements at the cost of some performance loss |
|
||||
| `attention_type` | `auto` | Select the type of attention to use. One of `auto`,`normal`,`xformers`,`sliced`, or `torch-sdp` |
|
||||
| `attention_slice_size` | `auto` | When "sliced" attention is selected, set the slice size. One of `auto`, `balanced`, `max` or the integers 1-8|
|
||||
| `force_tiled_decode` | `false` | Force the VAE step to decode in tiles, reducing memory consumption at the cost of performance |
|
||||
|
||||
### Device
|
||||
|
||||
These options configure the generation execution device.
|
||||
|
||||
| Setting | Default Value | Description |
|
||||
|-----------------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `device` | `auto` | Preferred execution device. One of `auto`, `cpu`, `cuda`, `cuda:1`, `mps`. `auto` will choose the device depending on the hardware platform and the installed torch capabilities. |
|
||||
| `precision` | `auto` | Floating point precision. One of `auto`, `float16` or `float32`. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system |
|
||||
|
||||
|
||||
### Paths
|
||||
#### Path Settings
|
||||
|
||||
These options set the paths of various directories and files used by
|
||||
InvokeAI. Relative paths are interpreted relative to INVOKEAI_ROOT, so
|
||||
if INVOKEAI_ROOT is `/home/fred/invokeai` and the path is
|
||||
InvokeAI. Relative paths are interpreted relative to the root directory, so
|
||||
if root is `/home/fred/invokeai` and the path is
|
||||
`autoimport/main`, then the corresponding directory will be located at
|
||||
`/home/fred/invokeai/autoimport/main`.
|
||||
|
||||
| Setting | Default Value | Description |
|
||||
|----------|----------------|--------------|
|
||||
| `autoimport_dir` | `autoimport/main` | At startup time, read and import any main model files found in this directory |
|
||||
| `lora_dir` | `autoimport/lora` | At startup time, read and import any LoRA/LyCORIS models found in this directory |
|
||||
| `embedding_dir` | `autoimport/embedding` | At startup time, read and import any textual inversion (embedding) models found in this directory |
|
||||
| `controlnet_dir` | `autoimport/controlnet` | At startup time, read and import any ControlNet models found in this directory |
|
||||
| `conf_path` | `configs/models.yaml` | Location of the `models.yaml` model configuration file |
|
||||
| `models_dir` | `models` | Location of the directory containing models installed by InvokeAI's model manager |
|
||||
| `legacy_conf_dir` | `configs/stable-diffusion` | Location of the directory containing the .yaml configuration files for legacy checkpoint models |
|
||||
| `db_dir` | `databases` | Location of the directory containing InvokeAI's image, schema and session database |
|
||||
| `outdir` | `outputs` | Location of the directory in which the gallery of generated and uploaded images will be stored |
|
||||
| `use_memory_db` | `false` | Keep database information in memory rather than on disk; this will not preserve image gallery information across restarts |
|
||||
|
||||
Note that the autoimport directories will be searched recursively,
|
||||
Note that the autoimport directory will be searched recursively,
|
||||
allowing you to organize the models into folders and subfolders in any
|
||||
way you wish. In addition, while we have split up autoimport
|
||||
directories by the type of model they contain, this isn't
|
||||
necessary. You can combine different model types in the same folder
|
||||
and InvokeAI will figure out what they are. So you can easily use just
|
||||
one autoimport directory by commenting out the unneeded paths:
|
||||
way you wish.
|
||||
|
||||
```
|
||||
Paths:
|
||||
autoimport_dir: autoimport
|
||||
# lora_dir: null
|
||||
# embedding_dir: null
|
||||
# controlnet_dir: null
|
||||
```
|
||||
|
||||
### Logging
|
||||
|
||||
These settings control the information, warning, and debugging
|
||||
messages printed to the console log while InvokeAI is running:
|
||||
|
||||
| Setting | Default Value | Description |
|
||||
|----------|----------------|--------------|
|
||||
| `log_handlers` | `console` | This controls where log messages are sent, and can be a list of one or more destinations. Values include `console`, `file`, `syslog` and `http`. These are described in more detail below |
|
||||
| `log_format` | `color` | This controls the formatting of the log messages. Values are `plain`, `color`, `legacy` and `syslog` |
|
||||
| `log_level` | `debug` | This filters messages according to the level of severity and can be one of `debug`, `info`, `warning`, `error` and `critical`. For example, setting to `warning` will display all messages at the warning level or higher, but won't display "debug" or "info" messages |
|
||||
#### Logging
|
||||
|
||||
Several different log handler destinations are available, and multiple destinations are supported by providing a list:
|
||||
|
||||
```
|
||||
log_handlers:
|
||||
- console
|
||||
- syslog=localhost
|
||||
- file=/var/log/invokeai.log
|
||||
```yaml
|
||||
log_handlers:
|
||||
- console
|
||||
- syslog=localhost
|
||||
- file=/var/log/invokeai.log
|
||||
```
|
||||
|
||||
* `console` is the default. It prints log messages to the command-line window from which InvokeAI was launched.
|
||||
- `console` is the default. It prints log messages to the command-line window from which InvokeAI was launched.
|
||||
|
||||
* `syslog` is only available on Linux and Macintosh systems. It uses
|
||||
- `syslog` is only available on Linux and Macintosh systems. It uses
|
||||
the operating system's "syslog" facility to write log file entries
|
||||
locally or to a remote logging machine. `syslog` offers a variety
|
||||
of configuration options:
|
||||
@ -271,7 +173,7 @@ Several different log handler destinations are available, and multiple destinati
|
||||
- Log to LAN-connected server "fredserver" using the facility LOG_USER and datagram packets.
|
||||
```
|
||||
|
||||
* `http` can be used to log to a remote web server. The server must be
|
||||
- `http` can be used to log to a remote web server. The server must be
|
||||
properly configured to receive and act on log messages. The option
|
||||
accepts the URL to the web server, and a `method` argument
|
||||
indicating whether the message should be submitted using the GET or
|
||||
@ -283,7 +185,10 @@ Several different log handler destinations are available, and multiple destinati
|
||||
|
||||
The `log_format` option provides several alternative formats:
|
||||
|
||||
* `color` - default format providing time, date and a message, using text colors to distinguish different log severities
|
||||
* `plain` - same as above, but monochrome text only
|
||||
* `syslog` - the log level and error message only, allowing the syslog system to attach the time and date
|
||||
* `legacy` - a format similar to the one used by the legacy 2.3 InvokeAI releases.
|
||||
- `color` - default format providing time, date and a message, using text colors to distinguish different log severities
|
||||
- `plain` - same as above, but monochrome text only
|
||||
- `syslog` - the log level and error message only, allowing the syslog system to attach the time and date
|
||||
- `legacy` - a format similar to the one used by the legacy 2.3 InvokeAI releases.
|
||||
|
||||
[basic guide to yaml files]: https://circleci.com/blog/what-is-yaml-a-beginner-s-guide/
|
||||
[Model Marketplace API Keys]: #model-marketplace-api-keys
|
||||
|
35
docs/features/DATABASE.md
Normal file
35
docs/features/DATABASE.md
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
title: Database
|
||||
---
|
||||
|
||||
# Invoke's SQLite Database
|
||||
|
||||
Invoke uses a SQLite database to store image, workflow, model, and execution data.
|
||||
|
||||
We take great care to ensure your data is safe, by utilizing transactions and a database migration system.
|
||||
|
||||
Even so, when testing an prerelease version of the app, we strongly suggest either backing up your database or using an in-memory database. This ensures any prelease hiccups or databases schema changes will not cause problems for your data.
|
||||
|
||||
## Database Backup
|
||||
|
||||
Backing up your database is very simple. Invoke's data is stored in an `$INVOKEAI_ROOT` directory - where your `invoke.sh`/`invoke.bat` and `invokeai.yaml` files live.
|
||||
|
||||
To back up your database, copy the `invokeai.db` file from `$INVOKEAI_ROOT/databases/invokeai.db` to somewhere safe.
|
||||
|
||||
If anything comes up during prelease testing, you can simply copy your backup back into `$INVOKEAI_ROOT/databases/`.
|
||||
|
||||
## In-Memory Database
|
||||
|
||||
SQLite can run on an in-memory database. Your existing database is untouched when this mode is enabled, but your existing data won't be accessible.
|
||||
|
||||
This is very useful for testing, as there is no chance of a database change modifying your "physical" database.
|
||||
|
||||
To run Invoke with a memory database, edit your `invokeai.yaml` file, and add `use_memory_db: true` to the `Paths:` stanza:
|
||||
|
||||
```yaml
|
||||
InvokeAI:
|
||||
Development:
|
||||
use_memory_db: true
|
||||
```
|
||||
|
||||
Delete this line (or set it to `false`) to use your main database.
|
@ -122,9 +122,9 @@ experimental versions later.
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest),
|
||||
and look for a file named:
|
||||
|
||||
- InvokeAI-installer-v3.X.X.zip
|
||||
- InvokeAI-installer-v4.X.X.zip
|
||||
|
||||
where "3.X.X" is the latest released version. The file is located
|
||||
where "4.X.X" is the latest released version. The file is located
|
||||
at the very bottom of the release page, under **Assets**.
|
||||
|
||||
4. **Unpack the installer**: Unpack the zip file into a convenient directory. This will create a new
|
||||
@ -199,136 +199,7 @@ experimental versions later.
|
||||

|
||||
</figure>
|
||||
|
||||
10. **Post-install Configuration**: After installation completes, the
|
||||
installer will launch the configuration form, which will guide you
|
||||
through the first-time process of adjusting some of InvokeAI's
|
||||
startup settings. To move around this form use ctrl-N for
|
||||
<N>ext and ctrl-P for <P>revious, or use <tab>
|
||||
and shift-<tab> to move forward and back. Once you are in a
|
||||
multi-checkbox field use the up and down cursor keys to select the
|
||||
item you want, and <space> to toggle it on and off. Within
|
||||
a directory field, pressing <tab> will provide autocomplete
|
||||
options.
|
||||
|
||||
Generally the defaults are fine, and you can come back to this screen at
|
||||
any time to tweak your system. Here are the options you can adjust:
|
||||
|
||||
- ***HuggingFace Access Token***
|
||||
InvokeAI has the ability to download embedded styles and subjects
|
||||
from the HuggingFace Concept Library on-demand. However, some of
|
||||
the concept library files are password protected. To make download
|
||||
smoother, you can set up an account at huggingface.co, obtain an
|
||||
access token, and paste it into this field. Note that you paste
|
||||
to this screen using ctrl-shift-V
|
||||
|
||||
- ***Free GPU memory after each generation***
|
||||
This is useful for low-memory machines and helps minimize the
|
||||
amount of GPU VRAM used by InvokeAI.
|
||||
|
||||
- ***Enable xformers support if available***
|
||||
If the xformers library was successfully installed, this will activate
|
||||
it to reduce memory consumption and increase rendering speed noticeably.
|
||||
Note that xformers has the side effect of generating slightly different
|
||||
images even when presented with the same seed and other settings.
|
||||
|
||||
- ***Force CPU to be used on GPU systems***
|
||||
This will use the (slow) CPU rather than the accelerated GPU. This
|
||||
can be used to generate images on systems that don't have a compatible
|
||||
GPU.
|
||||
|
||||
- ***Precision***
|
||||
This controls whether to use float32 or float16 arithmetic.
|
||||
float16 uses less memory but is also slightly less accurate.
|
||||
Ordinarily the right arithmetic is picked automatically ("auto"),
|
||||
but you may have to use float32 to get images on certain systems
|
||||
and graphics cards. The "autocast" option is deprecated and
|
||||
shouldn't be used unless you are asked to by a member of the team.
|
||||
|
||||
- **Size of the RAM cache used for fast model switching***
|
||||
This allows you to keep models in memory and switch rapidly among
|
||||
them rather than having them load from disk each time. This slider
|
||||
controls how many models to keep loaded at once. A typical SD-1 or SD-2 model
|
||||
uses 2-3 GB of memory. A typical SDXL model uses 6-7 GB. Providing more
|
||||
RAM will allow more models to be co-resident.
|
||||
|
||||
- ***Output directory for images***
|
||||
This is the path to a directory in which InvokeAI will store all its
|
||||
generated images.
|
||||
|
||||
- ***Autoimport Folder***
|
||||
This is the directory in which you can place models you have
|
||||
downloaded and wish to load into InvokeAI. You can place a variety
|
||||
of models in this directory, including diffusers folders, .ckpt files,
|
||||
.safetensors files, as well as LoRAs, ControlNet and Textual Inversion
|
||||
files (both folder and file versions). To help organize this folder,
|
||||
you can create several levels of subfolders and drop your models into
|
||||
whichever ones you want.
|
||||
|
||||
- ***LICENSE***
|
||||
|
||||
At the bottom of the screen you will see a checkbox for accepting
|
||||
the CreativeML Responsible AI Licenses. You need to accept the license
|
||||
in order to download Stable Diffusion models from the next screen.
|
||||
|
||||
_You can come back to the startup options form_ as many times as you like.
|
||||
From the `invoke.sh` or `invoke.bat` launcher, select option (6) to relaunch
|
||||
this script. On the command line, it is named `invokeai-configure`.
|
||||
|
||||
11. **Downloading Models**: After you press `[NEXT]` on the screen, you will be taken
|
||||
to another screen that prompts you to download a series of starter models. The ones
|
||||
we recommend are preselected for you, but you are encouraged to use the checkboxes to
|
||||
pick and choose.
|
||||
You will probably wish to download `autoencoder-840000` for use with models that
|
||||
were trained with an older version of the Stability VAE.
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
Below the preselected list of starter models is a large text field which you can use
|
||||
to specify a series of models to import. You can specify models in a variety of formats,
|
||||
each separated by a space or newline. The formats accepted are:
|
||||
|
||||
- The path to a .ckpt or .safetensors file. On most systems, you can drag a file from
|
||||
the file browser to the textfield to automatically paste the path. Be sure to remove
|
||||
extraneous quotation marks and other things that come along for the ride.
|
||||
|
||||
- The path to a directory containing a combination of `.ckpt` and `.safetensors` files.
|
||||
The directory will be scanned from top to bottom (including subfolders) and any
|
||||
file that can be imported will be.
|
||||
|
||||
- A URL pointing to a `.ckpt` or `.safetensors` file. You can cut
|
||||
and paste directly from a web page, or simply drag the link from the web page
|
||||
or navigation bar. (You can also use ctrl-shift-V to paste into this field)
|
||||
The file will be downloaded and installed.
|
||||
|
||||
- The HuggingFace repository ID (repo_id) for a `diffusers` model. These IDs have
|
||||
the format _author_name/model_name_, as in `andite/anything-v4.0`
|
||||
|
||||
- The path to a local directory containing a `diffusers`
|
||||
model. These directories always have the file `model_index.json`
|
||||
at their top level.
|
||||
|
||||
_Select a directory for models to import_ You may select a local
|
||||
directory for autoimporting at startup time. If you select this
|
||||
option, the directory you choose will be scanned for new
|
||||
.ckpt/.safetensors files each time InvokeAI starts up, and any new
|
||||
files will be automatically imported and made available for your
|
||||
use.
|
||||
|
||||
_Convert imported models into diffusers_ When legacy checkpoint
|
||||
files are imported, you may select to use them unmodified (the
|
||||
default) or to convert them into `diffusers` models. The latter
|
||||
load much faster and have slightly better rendering performance,
|
||||
but not all checkpoint files can be converted. Note that Stable Diffusion
|
||||
Version 2.X files are **only** supported in `diffusers` format and will
|
||||
be converted regardless.
|
||||
|
||||
_You can come back to the model install form_ as many times as you like.
|
||||
From the `invoke.sh` or `invoke.bat` launcher, select option (5) to relaunch
|
||||
this script. On the command line, it is named `invokeai-model-install`.
|
||||
|
||||
12. **Running InvokeAI for the first time**: The script will now exit and you'll be ready to generate some images. Look
|
||||
10. **Running InvokeAI for the first time**: The script will now exit and you'll be ready to generate some images. Look
|
||||
for the directory `invokeai` installed in the location you chose at the
|
||||
beginning of the install session. Look for a shell script named `invoke.sh`
|
||||
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
|
||||
@ -349,14 +220,14 @@ experimental versions later.
|
||||
http://localhost:9090. Click on this link to open up a browser
|
||||
and start exploring InvokeAI's features.
|
||||
|
||||
12. **InvokeAI Options**: You can launch InvokeAI with several different command-line arguments that
|
||||
customize its behavior. For example, you can change the location of the
|
||||
12. **InvokeAI Options**: You can configure using the `invokeai.yaml` config file.
|
||||
For example, you can change the location of the
|
||||
image output directory or balance memory usage vs performance. See
|
||||
[Configuration](../features/CONFIGURATION.md) for a full list of the options.
|
||||
|
||||
- To set defaults that will take effect every time you launch InvokeAI,
|
||||
use a text editor (e.g. Notepad) to exit the file
|
||||
`invokeai\invokeai.init`. It contains a variety of examples that you can
|
||||
`invokeai\invokeai.yaml`. It contains a variety of examples that you can
|
||||
follow to add and modify launch options.
|
||||
|
||||
- The launcher script also offers you an option labeled "open the developer
|
||||
@ -394,7 +265,6 @@ rm .\.venv -r -force
|
||||
python -mvenv .venv
|
||||
.\.venv\Scripts\activate
|
||||
pip install invokeai
|
||||
invokeai-configure --yes --root .
|
||||
```
|
||||
|
||||
If you see anything marked as an error during this process please stop
|
||||
@ -426,16 +296,10 @@ error messages:
|
||||
This failure mode occurs when there is a network glitch during
|
||||
downloading the very large SDXL model.
|
||||
|
||||
To address this, first go to the Web Model Manager and delete the
|
||||
Stable-Diffusion-XL-base-1.X model. Then navigate to HuggingFace and
|
||||
manually download the .safetensors version of the model. The 1.0
|
||||
version is located at
|
||||
https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main
|
||||
and the file is named `sd_xl_base_1.0.safetensors`.
|
||||
|
||||
Save this file to disk and then reenter the Model Manager. Navigate to
|
||||
Import Models->Add Model, then type (or drag-and-drop) the path to the
|
||||
.safetensors file. Press "Add Model".
|
||||
To address this, first go to the Model Manager and delete the
|
||||
Stable-Diffusion-XL-base-1.X model. Then, click the HuggingFace tab,
|
||||
paste the Repo ID stabilityai/stable-diffusion-xl-base-1.0 and install
|
||||
the model.
|
||||
|
||||
### _Package dependency conflicts_
|
||||
|
||||
@ -488,15 +352,7 @@ download models, etc), but this doesn't fix the problem.
|
||||
|
||||
This issue is often caused by a misconfigured configuration directive in the
|
||||
`invokeai\invokeai.init` initialization file that contains startup settings. The
|
||||
easiest way to fix the problem is to move the file out of the way and re-run
|
||||
`invokeai-configure`. Enter the developer's console (option 3 of the launcher
|
||||
script) and run this command:
|
||||
|
||||
```cmd
|
||||
invokeai-configure --root=.
|
||||
```
|
||||
|
||||
Note the dot (.) after `--root`. It is part of the command.
|
||||
easiest way to fix the problem is to move the file out of the way and restart the app.
|
||||
|
||||
_If none of these maneuvers fixes the problem_ then please report the problem to
|
||||
the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
||||
@ -565,16 +421,4 @@ This distribution is changing rapidly, and we add new features
|
||||
regularly. Releases are announced at
|
||||
http://github.com/invoke-ai/InvokeAI/releases, and at
|
||||
https://pypi.org/project/InvokeAI/ To update to the latest released
|
||||
version (recommended), follow these steps:
|
||||
|
||||
1. Start the `invoke.sh`/`invoke.bat` launch script from within the
|
||||
`invokeai` root directory.
|
||||
|
||||
2. Choose menu item (10) "Update InvokeAI".
|
||||
|
||||
3. This will launch a menu that gives you the option of:
|
||||
|
||||
1. Updating to the latest official release;
|
||||
2. Updating to the bleeding-edge development version; or
|
||||
3. Manually entering the tag or branch name of a version of
|
||||
InvokeAI you wish to try out.
|
||||
version (recommended), download the latest release and run the installer.
|
||||
|
@ -26,7 +26,7 @@ driver).
|
||||
|
||||
🖥️ **Download the latest installer .zip file here** : https://github.com/invoke-ai/InvokeAI/releases/latest
|
||||
|
||||
- *Look for the file labelled "InvokeAI-installer-v3.X.X.zip" at the bottom of the page*
|
||||
- *Look for the file labelled "InvokeAI-installer-v4.X.X.zip" at the bottom of the page*
|
||||
- If you experience issues, read through the full [installation instructions](010_INSTALL_AUTOMATED.md) to make sure you have met all of the installation requirements. If you need more help, join the [Discord](discord.gg/invoke-ai) or create an issue on [Github](https://github.com/invoke-ai/InvokeAI).
|
||||
|
||||
|
||||
|
@ -22,6 +22,24 @@ class MyInvocation(BaseInvocation):
|
||||
...
|
||||
```
|
||||
|
||||
The full API is documented below.
|
||||
|
||||
## Invocation Mixins
|
||||
|
||||
Two important mixins are provided to facilitate working with metadata and gallery boards.
|
||||
|
||||
### `WithMetadata`
|
||||
|
||||
Inherit from this class (in addition to `BaseInvocation`) to add a `metadata` input to your node. When you do this, you can access the metadata dict from `self.metadata` in the `invoke()` function.
|
||||
|
||||
The dict will be populated via the node's input, and you can add any metadata you'd like to it. When you call `context.images.save()`, if the metadata dict has any data, it be automatically embedded in the image.
|
||||
|
||||
### `WithBoard`
|
||||
|
||||
Inherit from this class (in addition to `BaseInvocation`) to add a `board` input to your node. This renders as a drop-down to select a board. The user's selection will be accessible from `self.board` in the `invoke()` function.
|
||||
|
||||
When you call `context.images.save()`, if a board was selected, the image will added to that board as it is saved.
|
||||
|
||||
<!-- prettier-ignore-start -->
|
||||
::: invokeai.app.services.shared.invocation_context.InvocationContext
|
||||
options:
|
||||
|
@ -149,9 +149,6 @@ class Installer:
|
||||
# install the launch/update scripts into the runtime directory
|
||||
self.instance.install_user_scripts()
|
||||
|
||||
# run through the configuration flow
|
||||
self.instance.configure()
|
||||
|
||||
|
||||
class InvokeAiInstance:
|
||||
"""
|
||||
@ -242,53 +239,6 @@ class InvokeAiInstance:
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
def configure(self):
|
||||
"""
|
||||
Configure the InvokeAI runtime directory
|
||||
"""
|
||||
|
||||
auto_install = False
|
||||
# set sys.argv to a consistent state
|
||||
new_argv = [sys.argv[0]]
|
||||
for i in range(1, len(sys.argv)):
|
||||
el = sys.argv[i]
|
||||
if el in ["-r", "--root"]:
|
||||
new_argv.append(el)
|
||||
new_argv.append(sys.argv[i + 1])
|
||||
elif el in ["-y", "--yes", "--yes-to-all"]:
|
||||
auto_install = True
|
||||
sys.argv = new_argv
|
||||
|
||||
import messages
|
||||
import requests # to catch download exceptions
|
||||
|
||||
auto_install = auto_install or messages.user_wants_auto_configuration()
|
||||
if auto_install:
|
||||
sys.argv.append("--yes")
|
||||
else:
|
||||
messages.introduction()
|
||||
|
||||
from invokeai.frontend.install.invokeai_configure import invokeai_configure
|
||||
|
||||
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
||||
# from the installer will also automatically propagate down to the config script.
|
||||
# this may change in the future with config refactoring!
|
||||
succeeded = False
|
||||
try:
|
||||
invokeai_configure()
|
||||
succeeded = True
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
print(f"\nA network error was encountered during configuration and download: {str(e)}")
|
||||
except OSError as e:
|
||||
print(f"\nAn OS error was encountered during configuration and download: {str(e)}")
|
||||
except Exception as e:
|
||||
print(f"\nA problem was encountered during the configuration and download steps: {str(e)}")
|
||||
finally:
|
||||
if not succeeded:
|
||||
print('To try again, find the "invokeai" directory, run the script "invoke.sh" or "invoke.bat"')
|
||||
print("and choose option 7 to fix a broken install, optionally followed by option 5 to install models.")
|
||||
print("Alternatively you can relaunch the installer.")
|
||||
|
||||
def install_user_scripts(self):
|
||||
"""
|
||||
Copy the launch and update scripts to the runtime dir
|
||||
|
@ -8,7 +8,7 @@ import platform
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
from prompt_toolkit import HTML, prompt
|
||||
from prompt_toolkit import prompt
|
||||
from prompt_toolkit.completion import FuzzyWordCompleter, PathCompleter
|
||||
from prompt_toolkit.validation import Validator
|
||||
from rich import box, print
|
||||
@ -98,39 +98,6 @@ def choose_version(available_releases: tuple | None = None) -> str:
|
||||
return "stable" if response == "" else response
|
||||
|
||||
|
||||
def user_wants_auto_configuration() -> bool:
|
||||
"""Prompt the user to choose between manual and auto configuration."""
|
||||
console.rule("InvokeAI Configuration Section")
|
||||
console.print(
|
||||
Panel(
|
||||
Group(
|
||||
"\n".join(
|
||||
[
|
||||
"Libraries are installed and InvokeAI will now set up its root directory and configuration. Choose between:",
|
||||
"",
|
||||
" * AUTOMATIC configuration: install reasonable defaults and a minimal set of starter models.",
|
||||
" * MANUAL configuration: manually inspect and adjust configuration options and pick from a larger set of starter models.",
|
||||
"",
|
||||
"Later you can fine tune your configuration by selecting option [6] 'Change InvokeAI startup options' from the invoke.bat/invoke.sh launcher script.",
|
||||
]
|
||||
),
|
||||
),
|
||||
box=box.MINIMAL,
|
||||
padding=(1, 1),
|
||||
)
|
||||
)
|
||||
choice = (
|
||||
prompt(
|
||||
HTML("Choose <b><a></b>utomatic or <b><m></b>anual configuration [a/m] (a): "),
|
||||
validator=Validator.from_callable(
|
||||
lambda n: n == "" or n.startswith(("a", "A", "m", "M")), error_message="Please select 'a' or 'm'"
|
||||
),
|
||||
)
|
||||
or "a"
|
||||
)
|
||||
return choice.lower().startswith("a")
|
||||
|
||||
|
||||
def confirm_install(dest: Path) -> bool:
|
||||
if dest.exists():
|
||||
print(f":stop_sign: Directory {dest} already exists!")
|
||||
@ -351,34 +318,6 @@ def windows_long_paths_registry() -> None:
|
||||
)
|
||||
|
||||
|
||||
def introduction() -> None:
|
||||
"""
|
||||
Display a banner when starting configuration of the InvokeAI application
|
||||
"""
|
||||
|
||||
console.rule()
|
||||
|
||||
console.print(
|
||||
Panel(
|
||||
title=":art: Configuring InvokeAI :art:",
|
||||
renderable=Group(
|
||||
"",
|
||||
"[b]This script will:",
|
||||
"",
|
||||
"1. Configure the InvokeAI application directory",
|
||||
"2. Help download the Stable Diffusion weight files",
|
||||
" and other large models that are needed for text to image generation",
|
||||
"3. Create initial configuration files.",
|
||||
"",
|
||||
"[i]At any point you may interrupt this program and resume later.",
|
||||
"",
|
||||
"[b]For the best user experience, please enlarge or maximize this window",
|
||||
),
|
||||
)
|
||||
)
|
||||
console.line(2)
|
||||
|
||||
|
||||
def _platform_specific_help() -> Text | None:
|
||||
if OS == "Darwin":
|
||||
text = Text.from_markup(
|
||||
|
@ -9,15 +9,10 @@ set INVOKEAI_ROOT=.
|
||||
:start
|
||||
echo Desired action:
|
||||
echo 1. Generate images with the browser-based interface
|
||||
echo 2. Run textual inversion training
|
||||
echo 3. Merge models (diffusers type only)
|
||||
echo 4. Download and install models
|
||||
echo 5. Change InvokeAI startup options
|
||||
echo 6. Re-run the configure script to fix a broken install or to complete a major upgrade
|
||||
echo 7. Open the developer console
|
||||
echo 8. Update InvokeAI (DEPRECATED - please use the installer)
|
||||
echo 9. Run the InvokeAI image database maintenance script
|
||||
echo 10. Command-line help
|
||||
echo 2. Open the developer console
|
||||
echo 3. Update InvokeAI (DEPRECATED - please use the installer)
|
||||
echo 4. Run the InvokeAI image database maintenance script
|
||||
echo 5. Command-line help
|
||||
echo Q - Quit
|
||||
set /P choice="Please enter 1-10, Q: [1] "
|
||||
if not defined choice set choice=1
|
||||
@ -25,21 +20,6 @@ IF /I "%choice%" == "1" (
|
||||
echo Starting the InvokeAI browser-based UI..
|
||||
python .venv\Scripts\invokeai-web.exe %*
|
||||
) ELSE IF /I "%choice%" == "2" (
|
||||
echo Starting textual inversion training..
|
||||
python .venv\Scripts\invokeai-ti.exe --gui
|
||||
) ELSE IF /I "%choice%" == "3" (
|
||||
echo Starting model merging script..
|
||||
python .venv\Scripts\invokeai-merge.exe --gui
|
||||
) ELSE IF /I "%choice%" == "4" (
|
||||
echo Running invokeai-model-install...
|
||||
python .venv\Scripts\invokeai-model-install.exe
|
||||
) ELSE IF /I "%choice%" == "5" (
|
||||
echo Running invokeai-configure...
|
||||
python .venv\Scripts\invokeai-configure.exe --skip-sd-weight --skip-support-models
|
||||
) ELSE IF /I "%choice%" == "6" (
|
||||
echo Running invokeai-configure...
|
||||
python .venv\Scripts\invokeai-configure.exe --yes --skip-sd-weight
|
||||
) ELSE IF /I "%choice%" == "7" (
|
||||
echo Developer Console
|
||||
echo Python command is:
|
||||
where python
|
||||
@ -51,15 +31,15 @@ IF /I "%choice%" == "1" (
|
||||
echo *************************
|
||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||
call cmd /k
|
||||
) ELSE IF /I "%choice%" == "8" (
|
||||
) ELSE IF /I "%choice%" == "3" (
|
||||
echo UPDATING FROM WITHIN THE APP IS BEING DEPRECATED.
|
||||
echo Please download the installer from https://github.com/invoke-ai/InvokeAI/releases/latest and run it to update your installation.
|
||||
timeout 4
|
||||
python -m invokeai.frontend.install.invokeai_update
|
||||
) ELSE IF /I "%choice%" == "9" (
|
||||
) ELSE IF /I "%choice%" == "4" (
|
||||
echo Running the db maintenance script...
|
||||
python .venv\Scripts\invokeai-db-maintenance.exe
|
||||
) ELSE IF /I "%choice%" == "10" (
|
||||
) ELSE IF /I "%choice%" == "5" (
|
||||
echo Displaying command line help...
|
||||
python .venv\Scripts\invokeai-web.exe --help %*
|
||||
pause
|
||||
|
@ -58,49 +58,24 @@ do_choice() {
|
||||
invokeai-web $PARAMS
|
||||
;;
|
||||
2)
|
||||
clear
|
||||
printf "Textual inversion training\n"
|
||||
invokeai-ti --gui $PARAMS
|
||||
;;
|
||||
3)
|
||||
clear
|
||||
printf "Merge models (diffusers type only)\n"
|
||||
invokeai-merge --gui $PARAMS
|
||||
;;
|
||||
4)
|
||||
clear
|
||||
printf "Download and install models\n"
|
||||
invokeai-model-install --root ${INVOKEAI_ROOT}
|
||||
;;
|
||||
5)
|
||||
clear
|
||||
printf "Change InvokeAI startup options\n"
|
||||
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
|
||||
;;
|
||||
6)
|
||||
clear
|
||||
printf "Re-run the configure script to fix a broken install or to complete a major upgrade\n"
|
||||
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only --skip-sd-weights
|
||||
;;
|
||||
7)
|
||||
clear
|
||||
printf "Open the developer console\n"
|
||||
file_name=$(basename "${BASH_SOURCE[0]}")
|
||||
bash --init-file "$file_name"
|
||||
;;
|
||||
8)
|
||||
3)
|
||||
clear
|
||||
printf "UPDATING FROM WITHIN THE APP IS BEING DEPRECATED\n"
|
||||
printf "Please download the installer from https://github.com/invoke-ai/InvokeAI/releases/latest and run it to update your installation.\n"
|
||||
sleep 4
|
||||
python -m invokeai.frontend.install.invokeai_update
|
||||
;;
|
||||
9)
|
||||
4)
|
||||
clear
|
||||
printf "Running the db maintenance script\n"
|
||||
invokeai-db-maintenance --root ${INVOKEAI_ROOT}
|
||||
;;
|
||||
10)
|
||||
5)
|
||||
clear
|
||||
printf "Command-line help\n"
|
||||
invokeai-web --help
|
||||
@ -118,15 +93,10 @@ do_choice() {
|
||||
do_dialog() {
|
||||
options=(
|
||||
1 "Generate images with a browser-based interface"
|
||||
2 "Textual inversion training"
|
||||
3 "Merge models (diffusers type only)"
|
||||
4 "Download and install models"
|
||||
5 "Change InvokeAI startup options"
|
||||
6 "Re-run the configure script to fix a broken install or to complete a major upgrade"
|
||||
7 "Open the developer console"
|
||||
8 "Update InvokeAI (DEPRECATED - please use the installer)"
|
||||
9 "Run the InvokeAI image database maintenance script"
|
||||
10 "Command-line help"
|
||||
2 "Open the developer console"
|
||||
3 "Update InvokeAI (DEPRECATED - please use the installer)"
|
||||
4 "Run the InvokeAI image database maintenance script"
|
||||
5 "Command-line help"
|
||||
)
|
||||
|
||||
choice=$(dialog --clear \
|
||||
@ -151,15 +121,10 @@ do_line_input() {
|
||||
printf " ** For a more attractive experience, please install the 'dialog' utility using your package manager. **\n\n"
|
||||
printf "What would you like to do?\n"
|
||||
printf "1: Generate images using the browser-based interface\n"
|
||||
printf "2: Run textual inversion training\n"
|
||||
printf "3: Merge models (diffusers type only)\n"
|
||||
printf "4: Download and install models\n"
|
||||
printf "5: Change InvokeAI startup options\n"
|
||||
printf "6: Re-run the configure script to fix a broken install\n"
|
||||
printf "7: Open the developer console\n"
|
||||
printf "8: Update InvokeAI\n"
|
||||
printf "9: Run the InvokeAI image database maintenance script\n"
|
||||
printf "10: Command-line help\n"
|
||||
printf "2: Open the developer console\n"
|
||||
printf "3: Update InvokeAI\n"
|
||||
printf "4: Run the InvokeAI image database maintenance script\n"
|
||||
printf "5: Command-line help\n"
|
||||
printf "Q: Quit\n\n"
|
||||
read -p "Please enter 1-10, Q: [1] " yn
|
||||
choice=${yn:='1'}
|
||||
|
@ -1,11 +0,0 @@
|
||||
Organization of the source tree:
|
||||
|
||||
app -- Home of nodes invocations and services
|
||||
assets -- Images and other data files used by InvokeAI
|
||||
backend -- Non-user facing libraries, including the rendering
|
||||
core.
|
||||
configs -- Configuration files used at install and run times
|
||||
frontend -- User-facing scripts, including the CLI and the WebUI
|
||||
version -- Current InvokeAI version string, stored
|
||||
in version/invokeai_version.py
|
||||
|
@ -25,6 +25,7 @@ from ..services.invocation_cache.invocation_cache_memory import MemoryInvocation
|
||||
from ..services.invocation_services import InvocationServices
|
||||
from ..services.invocation_stats.invocation_stats_default import InvocationStatsService
|
||||
from ..services.invoker import Invoker
|
||||
from ..services.model_images.model_images_default import ModelImageFileStorageDisk
|
||||
from ..services.model_manager.model_manager_default import ModelManagerService
|
||||
from ..services.model_records import ModelRecordServiceSQL
|
||||
from ..services.names.names_default import SimpleNameService
|
||||
@ -63,14 +64,15 @@ class ApiDependencies:
|
||||
def initialize(config: InvokeAIAppConfig, event_handler_id: int, logger: Logger = logger) -> None:
|
||||
logger.info(f"InvokeAI version {__version__}")
|
||||
logger.info(f"Root directory = {str(config.root_path)}")
|
||||
logger.debug(f"Internet connectivity is {config.internet_available}")
|
||||
|
||||
output_folder = config.output_path
|
||||
output_folder = config.outputs_path
|
||||
if output_folder is None:
|
||||
raise ValueError("Output folder is not set")
|
||||
|
||||
image_files = DiskImageFileStorage(f"{output_folder}/images")
|
||||
|
||||
model_images_folder = config.models_path
|
||||
|
||||
db = init_db(config=config, logger=logger, image_files=image_files)
|
||||
|
||||
configuration = config
|
||||
@ -92,6 +94,7 @@ class ApiDependencies:
|
||||
ObjectSerializerDisk[ConditioningFieldData](output_folder / "conditioning", ephemeral=True)
|
||||
)
|
||||
download_queue_service = DownloadQueueService(event_bus=events)
|
||||
model_images_service = ModelImageFileStorageDisk(model_images_folder / "model_images")
|
||||
model_manager = ModelManagerService.build_model_manager(
|
||||
app_config=configuration,
|
||||
model_record_service=ModelRecordServiceSQL(db=db),
|
||||
@ -118,6 +121,7 @@ class ApiDependencies:
|
||||
images=images,
|
||||
invocation_cache=invocation_cache,
|
||||
logger=logger,
|
||||
model_images=model_images_service,
|
||||
model_manager=model_manager,
|
||||
download_queue=download_queue_service,
|
||||
names=names,
|
||||
|
@ -12,7 +12,6 @@ from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.invocations.upscale import ESRGAN_MODELS
|
||||
from invokeai.app.services.invocation_cache.invocation_cache_common import InvocationCacheStatus
|
||||
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
|
||||
from invokeai.backend.image_util.patchmatch import PatchMatch
|
||||
from invokeai.backend.image_util.safety_checker import SafetyChecker
|
||||
from invokeai.backend.util.logging import logging
|
||||
@ -114,9 +113,7 @@ async def get_config() -> AppConfig:
|
||||
if SafetyChecker.safety_checker_available():
|
||||
nsfw_methods.append("nsfw_checker")
|
||||
|
||||
watermarking_methods = []
|
||||
if InvisibleWatermark.invisible_watermark_available():
|
||||
watermarking_methods.append("invisible_watermark")
|
||||
watermarking_methods = ["invisible_watermark"]
|
||||
|
||||
return AppConfig(
|
||||
infill_methods=infill_methods,
|
||||
|
@ -1,13 +1,21 @@
|
||||
# Copyright (c) 2023 Lincoln D. Stein
|
||||
"""FastAPI route for model configuration records."""
|
||||
|
||||
import contextlib
|
||||
import io
|
||||
import pathlib
|
||||
import shutil
|
||||
import traceback
|
||||
from copy import deepcopy
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from fastapi import Body, Path, Query, Response
|
||||
import huggingface_hub
|
||||
from fastapi import Body, Path, Query, Response, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
from fastapi.routing import APIRouter
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
from PIL import Image
|
||||
from pydantic import AnyHttpUrl, BaseModel, ConfigDict, Field
|
||||
from starlette.exceptions import HTTPException
|
||||
from typing_extensions import Annotated
|
||||
|
||||
@ -17,6 +25,7 @@ from invokeai.app.services.model_records import (
|
||||
UnknownModelException,
|
||||
)
|
||||
from invokeai.app.services.model_records.model_records_base import DuplicateModelException, ModelRecordChanges
|
||||
from invokeai.app.util.suppress_output import SuppressOutput
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
@ -25,12 +34,18 @@ from invokeai.backend.model_manager.config import (
|
||||
ModelType,
|
||||
SubModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.metadata.fetch.huggingface import HuggingFaceMetadataFetch
|
||||
from invokeai.backend.model_manager.metadata.metadata_base import ModelMetadataWithFiles, UnknownMetadataException
|
||||
from invokeai.backend.model_manager.search import ModelSearch
|
||||
from invokeai.backend.model_manager.starter_models import STARTER_MODELS, StarterModel
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
|
||||
model_manager_router = APIRouter(prefix="/v2/models", tags=["model_manager"])
|
||||
|
||||
# images are immutable; set a high max-age
|
||||
IMAGE_MAX_AGE = 31536000
|
||||
|
||||
|
||||
class ModelsList(BaseModel):
|
||||
"""Return list of configs."""
|
||||
@ -105,6 +120,9 @@ async def list_model_records(
|
||||
found_models.extend(
|
||||
record_store.search_by_attr(model_type=model_type, model_name=model_name, model_format=model_format)
|
||||
)
|
||||
for model in found_models:
|
||||
cover_image = ApiDependencies.invoker.services.model_images.get_url(model.key)
|
||||
model.cover_image = cover_image
|
||||
return ModelsList(models=found_models)
|
||||
|
||||
|
||||
@ -148,6 +166,8 @@ async def get_model_record(
|
||||
record_store = ApiDependencies.invoker.services.model_manager.store
|
||||
try:
|
||||
config: AnyModelConfig = record_store.get_model(key)
|
||||
cover_image = ApiDependencies.invoker.services.model_images.get_url(key)
|
||||
config.cover_image = cover_image
|
||||
return config
|
||||
except UnknownModelException as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
@ -234,6 +254,40 @@ async def scan_for_models(
|
||||
return scan_results
|
||||
|
||||
|
||||
class HuggingFaceModels(BaseModel):
|
||||
urls: List[AnyHttpUrl] | None = Field(description="URLs for all checkpoint format models in the metadata")
|
||||
is_diffusers: bool = Field(description="Whether the metadata is for a Diffusers format model")
|
||||
|
||||
|
||||
@model_manager_router.get(
|
||||
"/hugging_face",
|
||||
operation_id="get_hugging_face_models",
|
||||
responses={
|
||||
200: {"description": "Hugging Face repo scanned successfully"},
|
||||
400: {"description": "Invalid hugging face repo"},
|
||||
},
|
||||
status_code=200,
|
||||
response_model=HuggingFaceModels,
|
||||
)
|
||||
async def get_hugging_face_models(
|
||||
hugging_face_repo: str = Query(description="Hugging face repo to search for models", default=None),
|
||||
) -> HuggingFaceModels:
|
||||
try:
|
||||
metadata = HuggingFaceMetadataFetch().from_id(hugging_face_repo)
|
||||
except UnknownMetadataException:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="No HuggingFace repository found",
|
||||
)
|
||||
|
||||
assert isinstance(metadata, ModelMetadataWithFiles)
|
||||
|
||||
return HuggingFaceModels(
|
||||
urls=metadata.ckpt_urls,
|
||||
is_diffusers=metadata.is_diffusers,
|
||||
)
|
||||
|
||||
|
||||
@model_manager_router.patch(
|
||||
"/i/{key}",
|
||||
operation_id="update_model_record",
|
||||
@ -266,6 +320,75 @@ async def update_model_record(
|
||||
return model_response
|
||||
|
||||
|
||||
@model_manager_router.get(
|
||||
"/i/{key}/image",
|
||||
operation_id="get_model_image",
|
||||
responses={
|
||||
200: {
|
||||
"description": "The model image was fetched successfully",
|
||||
},
|
||||
400: {"description": "Bad request"},
|
||||
404: {"description": "The model image could not be found"},
|
||||
},
|
||||
status_code=200,
|
||||
)
|
||||
async def get_model_image(
|
||||
key: str = Path(description="The name of model image file to get"),
|
||||
) -> FileResponse:
|
||||
"""Gets an image file that previews the model"""
|
||||
|
||||
try:
|
||||
path = ApiDependencies.invoker.services.model_images.get_path(key)
|
||||
|
||||
response = FileResponse(
|
||||
path,
|
||||
media_type="image/png",
|
||||
filename=key + ".png",
|
||||
content_disposition_type="inline",
|
||||
)
|
||||
response.headers["Cache-Control"] = f"max-age={IMAGE_MAX_AGE}"
|
||||
return response
|
||||
except Exception:
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
@model_manager_router.patch(
|
||||
"/i/{key}/image",
|
||||
operation_id="update_model_image",
|
||||
responses={
|
||||
200: {
|
||||
"description": "The model image was updated successfully",
|
||||
},
|
||||
400: {"description": "Bad request"},
|
||||
},
|
||||
status_code=200,
|
||||
)
|
||||
async def update_model_image(
|
||||
key: Annotated[str, Path(description="Unique key of model")],
|
||||
image: UploadFile,
|
||||
) -> None:
|
||||
if not image.content_type or not image.content_type.startswith("image"):
|
||||
raise HTTPException(status_code=415, detail="Not an image")
|
||||
|
||||
contents = await image.read()
|
||||
try:
|
||||
pil_image = Image.open(io.BytesIO(contents))
|
||||
|
||||
except Exception:
|
||||
ApiDependencies.invoker.services.logger.error(traceback.format_exc())
|
||||
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
model_images = ApiDependencies.invoker.services.model_images
|
||||
try:
|
||||
model_images.save(pil_image, key)
|
||||
logger.info(f"Updated image for model: {key}")
|
||||
except ValueError as e:
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
return
|
||||
|
||||
|
||||
@model_manager_router.delete(
|
||||
"/i/{key}",
|
||||
operation_id="delete_model",
|
||||
@ -296,6 +419,29 @@ async def delete_model(
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
|
||||
|
||||
@model_manager_router.delete(
|
||||
"/i/{key}/image",
|
||||
operation_id="delete_model_image",
|
||||
responses={
|
||||
204: {"description": "Model image deleted successfully"},
|
||||
404: {"description": "Model image not found"},
|
||||
},
|
||||
status_code=204,
|
||||
)
|
||||
async def delete_model_image(
|
||||
key: str = Path(description="Unique key of model image to remove from model_images directory."),
|
||||
) -> None:
|
||||
logger = ApiDependencies.invoker.services.logger
|
||||
model_images = ApiDependencies.invoker.services.model_images
|
||||
try:
|
||||
model_images.delete(key)
|
||||
logger.info(f"Deleted model image: {key}")
|
||||
return
|
||||
except UnknownModelException as e:
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
|
||||
|
||||
# @model_manager_router.post(
|
||||
# "/i/",
|
||||
# operation_id="add_model_record",
|
||||
@ -539,7 +685,7 @@ async def convert_model(
|
||||
raise HTTPException(400, f"The model with key {key} is not a main checkpoint model.")
|
||||
|
||||
# loading the model will convert it into a cached diffusers file
|
||||
model_manager.load_model_by_config(model_config, submodel_type=SubModelType.Scheduler)
|
||||
model_manager.load.load_model(model_config, submodel_type=SubModelType.Scheduler)
|
||||
|
||||
# Get the path of the converted model from the loader
|
||||
cache_path = loader.convert_cache.cache_path(key)
|
||||
@ -640,3 +786,69 @@ async def convert_model(
|
||||
# except ValueError as e:
|
||||
# raise HTTPException(status_code=400, detail=str(e))
|
||||
# return response
|
||||
|
||||
|
||||
@model_manager_router.get("/starter_models", operation_id="get_starter_models", response_model=list[StarterModel])
|
||||
async def get_starter_models() -> list[StarterModel]:
|
||||
installed_models = ApiDependencies.invoker.services.model_manager.store.search_by_attr()
|
||||
installed_model_sources = {m.source for m in installed_models}
|
||||
starter_models = deepcopy(STARTER_MODELS)
|
||||
for model in starter_models:
|
||||
if model.source in installed_model_sources:
|
||||
model.is_installed = True
|
||||
# Remove already-installed dependencies
|
||||
missing_deps: list[str] = []
|
||||
for dep in model.dependencies or []:
|
||||
if dep not in installed_model_sources:
|
||||
missing_deps.append(dep)
|
||||
model.dependencies = missing_deps
|
||||
|
||||
return starter_models
|
||||
|
||||
|
||||
class HFTokenStatus(str, Enum):
|
||||
VALID = "valid"
|
||||
INVALID = "invalid"
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
|
||||
class HFTokenHelper:
|
||||
@classmethod
|
||||
def get_status(cls) -> HFTokenStatus:
|
||||
try:
|
||||
if huggingface_hub.get_token_permission(huggingface_hub.get_token()):
|
||||
# Valid token!
|
||||
return HFTokenStatus.VALID
|
||||
# No token set
|
||||
return HFTokenStatus.INVALID
|
||||
except Exception:
|
||||
return HFTokenStatus.UNKNOWN
|
||||
|
||||
@classmethod
|
||||
def set_token(cls, token: str) -> HFTokenStatus:
|
||||
with SuppressOutput(), contextlib.suppress(Exception):
|
||||
huggingface_hub.login(token=token, add_to_git_credential=False)
|
||||
return cls.get_status()
|
||||
|
||||
|
||||
@model_manager_router.get("/hf_login", operation_id="get_hf_login_status", response_model=HFTokenStatus)
|
||||
async def get_hf_login_status() -> HFTokenStatus:
|
||||
token_status = HFTokenHelper.get_status()
|
||||
|
||||
if token_status is HFTokenStatus.UNKNOWN:
|
||||
ApiDependencies.invoker.services.logger.warning("Unable to verify HF token")
|
||||
|
||||
return token_status
|
||||
|
||||
|
||||
@model_manager_router.post("/hf_login", operation_id="do_hf_login", response_model=HFTokenStatus)
|
||||
async def do_hf_login(
|
||||
token: str = Body(description="Hugging Face token to use for login", embed=True),
|
||||
) -> HFTokenStatus:
|
||||
HFTokenHelper.set_token(token)
|
||||
token_status = HFTokenHelper.get_status()
|
||||
|
||||
if token_status is HFTokenStatus.UNKNOWN:
|
||||
ApiDependencies.invoker.services.logger.warning("Unable to verify HF token")
|
||||
|
||||
return token_status
|
||||
|
@ -1,71 +1,62 @@
|
||||
# parse_args() must be called before any other imports. if it is not called first, consumers of the config
|
||||
# which are imported/used before parse_args() is called will get the default config values instead of the
|
||||
# values from the command line or config file.
|
||||
import asyncio
|
||||
import mimetypes
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
import sys
|
||||
from contextlib import asynccontextmanager
|
||||
from inspect import signature
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import uvicorn
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.middleware.gzip import GZipMiddleware
|
||||
from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html
|
||||
from fastapi.openapi.utils import get_openapi
|
||||
from fastapi.responses import HTMLResponse
|
||||
from fastapi_events.handlers.local import local_handler
|
||||
from fastapi_events.middleware import EventHandlerASGIMiddleware
|
||||
from pydantic.json_schema import models_json_schema
|
||||
from torch.backends.mps import is_available as is_mps_available
|
||||
|
||||
# for PyCharm:
|
||||
# noinspection PyUnresolvedReferences
|
||||
import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import)
|
||||
import invokeai.frontend.web as web_dir
|
||||
from invokeai.app.api.no_cache_staticfiles import NoCacheStaticFiles
|
||||
from invokeai.version.invokeai_version import __version__
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
|
||||
|
||||
from ..backend.util.logging import InvokeAILogger
|
||||
from .api.dependencies import ApiDependencies
|
||||
from .api.routers import (
|
||||
app_info,
|
||||
board_images,
|
||||
boards,
|
||||
download_queue,
|
||||
images,
|
||||
model_manager,
|
||||
session_queue,
|
||||
utilities,
|
||||
workflows,
|
||||
)
|
||||
from .api.sockets import SocketIO
|
||||
from .invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
UIConfigBase,
|
||||
)
|
||||
from .invocations.fields import InputFieldJSONSchemaExtra, OutputFieldJSONSchemaExtra
|
||||
from .services.config import InvokeAIAppConfig
|
||||
|
||||
app_config = InvokeAIAppConfig.get_config()
|
||||
app_config.parse_args()
|
||||
if app_config.version:
|
||||
print(f"InvokeAI version {__version__}")
|
||||
sys.exit(0)
|
||||
|
||||
if True: # hack to make flake8 happy with imports coming after setting up the config
|
||||
import asyncio
|
||||
import mimetypes
|
||||
import socket
|
||||
from inspect import signature
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import uvicorn
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.middleware.gzip import GZipMiddleware
|
||||
from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html
|
||||
from fastapi.openapi.utils import get_openapi
|
||||
from fastapi.responses import HTMLResponse
|
||||
from fastapi_events.handlers.local import local_handler
|
||||
from fastapi_events.middleware import EventHandlerASGIMiddleware
|
||||
from pydantic.json_schema import models_json_schema
|
||||
from torch.backends.mps import is_available as is_mps_available
|
||||
|
||||
# for PyCharm:
|
||||
# noinspection PyUnresolvedReferences
|
||||
import invokeai.backend.util.hotfixes # noqa: F401 (monkeypatching on import)
|
||||
import invokeai.frontend.web as web_dir
|
||||
|
||||
from ..backend.util.logging import InvokeAILogger
|
||||
from .api.dependencies import ApiDependencies
|
||||
from .api.routers import (
|
||||
app_info,
|
||||
board_images,
|
||||
boards,
|
||||
download_queue,
|
||||
images,
|
||||
model_manager,
|
||||
session_queue,
|
||||
utilities,
|
||||
workflows,
|
||||
)
|
||||
from .api.sockets import SocketIO
|
||||
from .invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
UIConfigBase,
|
||||
)
|
||||
|
||||
if is_mps_available():
|
||||
import invokeai.backend.util.mps_fixes # noqa: F401 (monkeypatching on import)
|
||||
app_config = get_config()
|
||||
|
||||
|
||||
if is_mps_available():
|
||||
import invokeai.backend.util.mps_fixes # noqa: F401 (monkeypatching on import)
|
||||
|
||||
|
||||
app_config = InvokeAIAppConfig.get_config()
|
||||
app_config.parse_args()
|
||||
logger = InvokeAILogger.get_logger(config=app_config)
|
||||
# fix for windows mimetypes registry entries being borked
|
||||
# see https://github.com/invoke-ai/InvokeAI/discussions/3684#discussioncomment-6391352
|
||||
@ -156,17 +147,19 @@ def custom_openapi() -> dict[str, Any]:
|
||||
openapi_schema["components"]["schemas"][schema_key] = output_schema
|
||||
openapi_schema["components"]["schemas"][schema_key]["class"] = "output"
|
||||
|
||||
# Add Node Editor UI helper schemas
|
||||
ui_config_schemas = models_json_schema(
|
||||
# Some models don't end up in the schemas as standalone definitions
|
||||
additional_schemas = models_json_schema(
|
||||
[
|
||||
(UIConfigBase, "serialization"),
|
||||
(InputFieldJSONSchemaExtra, "serialization"),
|
||||
(OutputFieldJSONSchemaExtra, "serialization"),
|
||||
(ModelIdentifierField, "serialization"),
|
||||
(ProgressImage, "serialization"),
|
||||
],
|
||||
ref_template="#/components/schemas/{model}",
|
||||
)
|
||||
for schema_key, ui_config_schema in ui_config_schemas[1]["$defs"].items():
|
||||
openapi_schema["components"]["schemas"][schema_key] = ui_config_schema
|
||||
for schema_key, schema_json in additional_schemas[1]["$defs"].items():
|
||||
openapi_schema["components"]["schemas"][schema_key] = schema_json
|
||||
|
||||
# Add a reference to the output type to additionalProperties of the invoker schema
|
||||
for invoker in all_invocations:
|
||||
@ -233,6 +226,27 @@ app.mount(
|
||||
|
||||
|
||||
def invoke_api() -> None:
|
||||
class InterruptWatcher:
|
||||
def __init__(self):
|
||||
self.child = os.fork()
|
||||
if self.child == 0:
|
||||
return
|
||||
else:
|
||||
self.watch()
|
||||
|
||||
def watch(self) -> None:
|
||||
try:
|
||||
os.wait()
|
||||
except KeyboardInterrupt:
|
||||
self.kill()
|
||||
sys.exit()
|
||||
|
||||
def kill(self) -> None:
|
||||
try:
|
||||
os.kill(self.child, signal.SIGKILL)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def find_port(port: int) -> int:
|
||||
"""Find a port not in use starting at given port"""
|
||||
# Taken from https://waylonwalker.com/python-find-available-port/, thanks Waylon!
|
||||
@ -243,9 +257,7 @@ def invoke_api() -> None:
|
||||
else:
|
||||
return port
|
||||
|
||||
from invokeai.backend.install.check_root import check_invokeai_root
|
||||
|
||||
check_invokeai_root(app_config) # note, may exit with an exception if root not set up
|
||||
InterruptWatcher()
|
||||
|
||||
if app_config.dev_reload:
|
||||
try:
|
||||
|
@ -3,9 +3,9 @@ import sys
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from pathlib import Path
|
||||
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
|
||||
custom_nodes_path = Path(InvokeAIAppConfig.get_config().custom_nodes_path.resolve())
|
||||
custom_nodes_path = Path(get_config().custom_nodes_path)
|
||||
custom_nodes_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
custom_nodes_init_path = str(custom_nodes_path / "__init__.py")
|
||||
|
@ -33,7 +33,7 @@ from invokeai.app.invocations.fields import (
|
||||
FieldKind,
|
||||
Input,
|
||||
)
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.metaenum import MetaEnum
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
@ -191,7 +191,7 @@ class BaseInvocation(ABC, BaseModel):
|
||||
@classmethod
|
||||
def get_invocations(cls) -> Iterable[BaseInvocation]:
|
||||
"""Gets all invocations, respecting the allowlist and denylist."""
|
||||
app_config = InvokeAIAppConfig.get_config()
|
||||
app_config = get_config()
|
||||
allowed_invocations: set[BaseInvocation] = set()
|
||||
for sc in cls._invocation_classes:
|
||||
invocation_type = sc.get_type()
|
||||
|
@ -20,7 +20,7 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
|
||||
from invokeai.backend.util.devices import torch_dtype
|
||||
|
||||
from .baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from .model import ClipField
|
||||
from .model import CLIPField
|
||||
|
||||
# unconditioned: Optional[torch.Tensor]
|
||||
|
||||
@ -36,7 +36,7 @@ from .model import ClipField
|
||||
title="Prompt",
|
||||
tags=["prompt", "compel"],
|
||||
category="conditioning",
|
||||
version="1.0.1",
|
||||
version="1.1.1",
|
||||
)
|
||||
class CompelInvocation(BaseInvocation):
|
||||
"""Parse prompt using compel package to conditioning."""
|
||||
@ -46,7 +46,7 @@ class CompelInvocation(BaseInvocation):
|
||||
description=FieldDescriptions.compel_prompt,
|
||||
ui_component=UIComponent.Textarea,
|
||||
)
|
||||
clip: ClipField = InputField(
|
||||
clip: CLIPField = InputField(
|
||||
title="CLIP",
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
@ -54,16 +54,16 @@ class CompelInvocation(BaseInvocation):
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ConditioningOutput:
|
||||
tokenizer_info = context.models.load(**self.clip.tokenizer.model_dump())
|
||||
tokenizer_info = context.models.load(self.clip.tokenizer)
|
||||
tokenizer_model = tokenizer_info.model
|
||||
assert isinstance(tokenizer_model, CLIPTokenizer)
|
||||
text_encoder_info = context.models.load(**self.clip.text_encoder.model_dump())
|
||||
text_encoder_info = context.models.load(self.clip.text_encoder)
|
||||
text_encoder_model = text_encoder_info.model
|
||||
assert isinstance(text_encoder_model, CLIPTextModel)
|
||||
|
||||
def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
|
||||
for lora in self.clip.loras:
|
||||
lora_info = context.models.load(**lora.model_dump(exclude={"weight"}))
|
||||
lora_info = context.models.load(lora.lora)
|
||||
assert isinstance(lora_info.model, LoRAModelRaw)
|
||||
yield (lora_info.model, lora.weight)
|
||||
del lora_info
|
||||
@ -127,16 +127,16 @@ class SDXLPromptInvocationBase:
|
||||
def run_clip_compel(
|
||||
self,
|
||||
context: InvocationContext,
|
||||
clip_field: ClipField,
|
||||
clip_field: CLIPField,
|
||||
prompt: str,
|
||||
get_pooled: bool,
|
||||
lora_prefix: str,
|
||||
zero_on_empty: bool,
|
||||
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[ExtraConditioningInfo]]:
|
||||
tokenizer_info = context.models.load(**clip_field.tokenizer.model_dump())
|
||||
tokenizer_info = context.models.load(clip_field.tokenizer)
|
||||
tokenizer_model = tokenizer_info.model
|
||||
assert isinstance(tokenizer_model, CLIPTokenizer)
|
||||
text_encoder_info = context.models.load(**clip_field.text_encoder.model_dump())
|
||||
text_encoder_info = context.models.load(clip_field.text_encoder)
|
||||
text_encoder_model = text_encoder_info.model
|
||||
assert isinstance(text_encoder_model, (CLIPTextModel, CLIPTextModelWithProjection))
|
||||
|
||||
@ -163,7 +163,7 @@ class SDXLPromptInvocationBase:
|
||||
|
||||
def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
|
||||
for lora in clip_field.loras:
|
||||
lora_info = context.models.load(**lora.model_dump(exclude={"weight"}))
|
||||
lora_info = context.models.load(lora.lora)
|
||||
lora_model = lora_info.model
|
||||
assert isinstance(lora_model, LoRAModelRaw)
|
||||
yield (lora_model, lora.weight)
|
||||
@ -232,7 +232,7 @@ class SDXLPromptInvocationBase:
|
||||
title="SDXL Prompt",
|
||||
tags=["sdxl", "compel", "prompt"],
|
||||
category="conditioning",
|
||||
version="1.0.1",
|
||||
version="1.1.1",
|
||||
)
|
||||
class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
"""Parse prompt using compel package to conditioning."""
|
||||
@ -253,8 +253,8 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
crop_left: int = InputField(default=0, description="")
|
||||
target_width: int = InputField(default=1024, description="")
|
||||
target_height: int = InputField(default=1024, description="")
|
||||
clip: ClipField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 1")
|
||||
clip2: ClipField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 2")
|
||||
clip: CLIPField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 1")
|
||||
clip2: CLIPField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP 2")
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ConditioningOutput:
|
||||
@ -325,7 +325,7 @@ class SDXLCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
title="SDXL Refiner Prompt",
|
||||
tags=["sdxl", "compel", "prompt"],
|
||||
category="conditioning",
|
||||
version="1.0.1",
|
||||
version="1.1.1",
|
||||
)
|
||||
class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase):
|
||||
"""Parse prompt using compel package to conditioning."""
|
||||
@ -340,7 +340,7 @@ class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase
|
||||
crop_top: int = InputField(default=0, description="")
|
||||
crop_left: int = InputField(default=0, description="")
|
||||
aesthetic_score: float = InputField(default=6.0, description=FieldDescriptions.sdxl_aesthetic)
|
||||
clip2: ClipField = InputField(description=FieldDescriptions.clip, input=Input.Connection)
|
||||
clip2: CLIPField = InputField(description=FieldDescriptions.clip, input=Input.Connection)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> ConditioningOutput:
|
||||
@ -370,10 +370,10 @@ class SDXLRefinerCompelPromptInvocation(BaseInvocation, SDXLPromptInvocationBase
|
||||
|
||||
|
||||
@invocation_output("clip_skip_output")
|
||||
class ClipSkipInvocationOutput(BaseInvocationOutput):
|
||||
"""Clip skip node output"""
|
||||
class CLIPSkipInvocationOutput(BaseInvocationOutput):
|
||||
"""CLIP skip node output"""
|
||||
|
||||
clip: Optional[ClipField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP")
|
||||
clip: Optional[CLIPField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP")
|
||||
|
||||
|
||||
@invocation(
|
||||
@ -381,17 +381,17 @@ class ClipSkipInvocationOutput(BaseInvocationOutput):
|
||||
title="CLIP Skip",
|
||||
tags=["clipskip", "clip", "skip"],
|
||||
category="conditioning",
|
||||
version="1.0.0",
|
||||
version="1.1.0",
|
||||
)
|
||||
class ClipSkipInvocation(BaseInvocation):
|
||||
class CLIPSkipInvocation(BaseInvocation):
|
||||
"""Skip layers in clip text_encoder model."""
|
||||
|
||||
clip: ClipField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP")
|
||||
clip: CLIPField = InputField(description=FieldDescriptions.clip, input=Input.Connection, title="CLIP")
|
||||
skipped_layers: int = InputField(default=0, ge=0, description=FieldDescriptions.skipped_layers)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ClipSkipInvocationOutput:
|
||||
def invoke(self, context: InvocationContext) -> CLIPSkipInvocationOutput:
|
||||
self.clip.skipped_layers += self.skipped_layers
|
||||
return ClipSkipInvocationOutput(
|
||||
return CLIPSkipInvocationOutput(
|
||||
clip=self.clip,
|
||||
)
|
||||
|
||||
|
@ -7,12 +7,8 @@ from typing import Dict, List, Literal, Union
|
||||
import cv2
|
||||
import numpy as np
|
||||
from controlnet_aux import (
|
||||
CannyDetector,
|
||||
ContentShuffleDetector,
|
||||
HEDdetector,
|
||||
LeresDetector,
|
||||
LineartAnimeDetector,
|
||||
LineartDetector,
|
||||
MediapipeFaceDetector,
|
||||
MidasDetector,
|
||||
MLSDdetector,
|
||||
@ -31,14 +27,20 @@ from invokeai.app.invocations.fields import (
|
||||
Input,
|
||||
InputField,
|
||||
OutputField,
|
||||
UIType,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
)
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.image_util.canny import get_canny_edges
|
||||
from invokeai.backend.image_util.depth_anything import DepthAnythingDetector
|
||||
from invokeai.backend.image_util.dw_openpose import DWOpenposeDetector
|
||||
from invokeai.backend.image_util.hed import HEDProcessor
|
||||
from invokeai.backend.image_util.lineart import LineartProcessor
|
||||
from invokeai.backend.image_util.lineart_anime import LineartAnimeProcessor
|
||||
|
||||
from .baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
|
||||
@ -51,15 +53,9 @@ CONTROLNET_RESIZE_VALUES = Literal[
|
||||
]
|
||||
|
||||
|
||||
class ControlNetModelField(BaseModel):
|
||||
"""ControlNet model field"""
|
||||
|
||||
key: str = Field(description="Model config record key for the ControlNet model")
|
||||
|
||||
|
||||
class ControlField(BaseModel):
|
||||
image: ImageField = Field(description="The control image")
|
||||
control_model: ControlNetModelField = Field(description="The ControlNet model to use")
|
||||
control_model: ModelIdentifierField = Field(description="The ControlNet model to use")
|
||||
control_weight: Union[float, List[float]] = Field(default=1, description="The weight given to the ControlNet")
|
||||
begin_step_percent: float = Field(
|
||||
default=0, ge=0, le=1, description="When the ControlNet is first applied (% of total steps)"
|
||||
@ -95,7 +91,9 @@ class ControlNetInvocation(BaseInvocation):
|
||||
"""Collects ControlNet info to pass to other nodes"""
|
||||
|
||||
image: ImageField = InputField(description="The control image")
|
||||
control_model: ControlNetModelField = InputField(description=FieldDescriptions.controlnet_model, input=Input.Direct)
|
||||
control_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.controlnet_model, input=Input.Direct, ui_type=UIType.ControlNetModel
|
||||
)
|
||||
control_weight: Union[float, List[float]] = InputField(
|
||||
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"
|
||||
)
|
||||
@ -173,11 +171,13 @@ class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Canny Processor",
|
||||
tags=["controlnet", "canny"],
|
||||
category="controlnet",
|
||||
version="1.2.1",
|
||||
version="1.3.2",
|
||||
)
|
||||
class CannyImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Canny edge detection for ControlNet"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
low_threshold: int = InputField(
|
||||
default=100, ge=0, le=255, description="The low threshold of the Canny pixel gradient (0-255)"
|
||||
)
|
||||
@ -189,9 +189,14 @@ class CannyImageProcessorInvocation(ImageProcessorInvocation):
|
||||
# Keep alpha channel for Canny processing to detect edges of transparent areas
|
||||
return context.images.get_pil(self.image.image_name, "RGBA")
|
||||
|
||||
def run_processor(self, image):
|
||||
canny_processor = CannyDetector()
|
||||
processed_image = canny_processor(image, self.low_threshold, self.high_threshold)
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
processed_image = get_canny_edges(
|
||||
image,
|
||||
self.low_threshold,
|
||||
self.high_threshold,
|
||||
detect_resolution=self.detect_resolution,
|
||||
image_resolution=self.image_resolution,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@ -200,7 +205,7 @@ class CannyImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="HED (softedge) Processor",
|
||||
tags=["controlnet", "hed", "softedge"],
|
||||
category="controlnet",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class HedImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies HED edge detection to image"""
|
||||
@ -211,9 +216,9 @@ class HedImageProcessorInvocation(ImageProcessorInvocation):
|
||||
# safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
|
||||
scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
|
||||
|
||||
def run_processor(self, image):
|
||||
hed_processor = HEDdetector.from_pretrained("lllyasviel/Annotators")
|
||||
processed_image = hed_processor(
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
hed_processor = HEDProcessor()
|
||||
processed_image = hed_processor.run(
|
||||
image,
|
||||
detect_resolution=self.detect_resolution,
|
||||
image_resolution=self.image_resolution,
|
||||
@ -229,7 +234,7 @@ class HedImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Lineart Processor",
|
||||
tags=["controlnet", "lineart"],
|
||||
category="controlnet",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class LineartImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies line art processing to image"""
|
||||
@ -238,9 +243,9 @@ class LineartImageProcessorInvocation(ImageProcessorInvocation):
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
coarse: bool = InputField(default=False, description="Whether to use coarse mode")
|
||||
|
||||
def run_processor(self, image):
|
||||
lineart_processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
|
||||
processed_image = lineart_processor(
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
lineart_processor = LineartProcessor()
|
||||
processed_image = lineart_processor.run(
|
||||
image, detect_resolution=self.detect_resolution, image_resolution=self.image_resolution, coarse=self.coarse
|
||||
)
|
||||
return processed_image
|
||||
@ -251,7 +256,7 @@ class LineartImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Lineart Anime Processor",
|
||||
tags=["controlnet", "lineart", "anime"],
|
||||
category="controlnet",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies line art anime processing to image"""
|
||||
@ -259,9 +264,9 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
processor = LineartAnimeDetector.from_pretrained("lllyasviel/Annotators")
|
||||
processed_image = processor(
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
processor = LineartAnimeProcessor()
|
||||
processed_image = processor.run(
|
||||
image,
|
||||
detect_resolution=self.detect_resolution,
|
||||
image_resolution=self.image_resolution,
|
||||
@ -274,13 +279,15 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Midas Depth Processor",
|
||||
tags=["controlnet", "midas"],
|
||||
category="controlnet",
|
||||
version="1.2.1",
|
||||
version="1.2.3",
|
||||
)
|
||||
class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies Midas depth processing to image"""
|
||||
|
||||
a_mult: float = InputField(default=2.0, ge=0, description="Midas parameter `a_mult` (a = a_mult * PI)")
|
||||
bg_th: float = InputField(default=0.1, ge=0, description="Midas parameter `bg_th`")
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
# depth_and_normal not supported in controlnet_aux v0.0.3
|
||||
# depth_and_normal: bool = InputField(default=False, description="whether to use depth and normal mode")
|
||||
|
||||
@ -290,6 +297,8 @@ class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
image,
|
||||
a=np.pi * self.a_mult,
|
||||
bg_th=self.bg_th,
|
||||
image_resolution=self.image_resolution,
|
||||
detect_resolution=self.detect_resolution,
|
||||
# dept_and_normal not supported in controlnet_aux v0.0.3
|
||||
# depth_and_normal=self.depth_and_normal,
|
||||
)
|
||||
@ -301,7 +310,7 @@ class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Normal BAE Processor",
|
||||
tags=["controlnet"],
|
||||
category="controlnet",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies NormalBae processing to image"""
|
||||
@ -318,7 +327,7 @@ class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
|
||||
|
||||
@invocation(
|
||||
"mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.2.1"
|
||||
"mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.2.2"
|
||||
)
|
||||
class MlsdImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies MLSD processing to image"""
|
||||
@ -341,7 +350,7 @@ class MlsdImageProcessorInvocation(ImageProcessorInvocation):
|
||||
|
||||
|
||||
@invocation(
|
||||
"pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.2.1"
|
||||
"pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.2.2"
|
||||
)
|
||||
class PidiImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies PIDI processing to image"""
|
||||
@ -368,7 +377,7 @@ class PidiImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Content Shuffle Processor",
|
||||
tags=["controlnet", "contentshuffle"],
|
||||
category="controlnet",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies content shuffle processing to image"""
|
||||
@ -398,7 +407,7 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Zoe (Depth) Processor",
|
||||
tags=["controlnet", "zoe", "depth"],
|
||||
category="controlnet",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies Zoe depth processing to image"""
|
||||
@ -414,17 +423,25 @@ class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Mediapipe Face Processor",
|
||||
tags=["controlnet", "mediapipe", "face"],
|
||||
category="controlnet",
|
||||
version="1.2.1",
|
||||
version="1.2.3",
|
||||
)
|
||||
class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies mediapipe face processing to image"""
|
||||
|
||||
max_faces: int = InputField(default=1, ge=1, description="Maximum number of faces to detect")
|
||||
min_confidence: float = InputField(default=0.5, ge=0, le=1, description="Minimum confidence for face detection")
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
mediapipe_face_processor = MediapipeFaceDetector()
|
||||
processed_image = mediapipe_face_processor(image, max_faces=self.max_faces, min_confidence=self.min_confidence)
|
||||
processed_image = mediapipe_face_processor(
|
||||
image,
|
||||
max_faces=self.max_faces,
|
||||
min_confidence=self.min_confidence,
|
||||
image_resolution=self.image_resolution,
|
||||
detect_resolution=self.detect_resolution,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@ -433,7 +450,7 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Leres (Depth) Processor",
|
||||
tags=["controlnet", "leres", "depth"],
|
||||
category="controlnet",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class LeresImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies leres processing to image"""
|
||||
@ -462,7 +479,7 @@ class LeresImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Tile Resample Processor",
|
||||
tags=["controlnet", "tile"],
|
||||
category="controlnet",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class TileResamplerProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Tile resampler processor"""
|
||||
@ -502,18 +519,23 @@ class TileResamplerProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Segment Anything Processor",
|
||||
tags=["controlnet", "segmentanything"],
|
||||
category="controlnet",
|
||||
version="1.2.1",
|
||||
version="1.2.3",
|
||||
)
|
||||
class SegmentAnythingProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies segment anything processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
# segment_anything_processor = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints")
|
||||
segment_anything_processor = SamDetectorReproducibleColors.from_pretrained(
|
||||
"ybelkada/segment-anything", subfolder="checkpoints"
|
||||
)
|
||||
np_img = np.array(image, dtype=np.uint8)
|
||||
processed_image = segment_anything_processor(np_img)
|
||||
processed_image = segment_anything_processor(
|
||||
np_img, image_resolution=self.image_resolution, detect_resolution=self.detect_resolution
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@ -544,7 +566,7 @@ class SamDetectorReproducibleColors(SamDetector):
|
||||
title="Color Map Processor",
|
||||
tags=["controlnet"],
|
||||
category="controlnet",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates a color map from the provided image"""
|
||||
@ -576,7 +598,7 @@ DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small"]
|
||||
title="Depth Anything Processor",
|
||||
tags=["controlnet", "depth", "depth anything"],
|
||||
category="controlnet",
|
||||
version="1.0.0",
|
||||
version="1.1.1",
|
||||
)
|
||||
class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates a depth map based on the Depth Anything algorithm"""
|
||||
@ -585,13 +607,12 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
default="small", description="The size of the depth model to use"
|
||||
)
|
||||
resolution: int = InputField(default=512, ge=64, multiple_of=64, description=FieldDescriptions.image_res)
|
||||
offload: bool = InputField(default=False)
|
||||
|
||||
def run_processor(self, image: Image.Image):
|
||||
depth_anything_detector = DepthAnythingDetector()
|
||||
depth_anything_detector.load_model(model_size=self.model_size)
|
||||
|
||||
processed_image = depth_anything_detector(image=image, resolution=self.resolution, offload=self.offload)
|
||||
processed_image = depth_anything_detector(image=image, resolution=self.resolution)
|
||||
return processed_image
|
||||
|
||||
|
||||
@ -600,7 +621,7 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="DW Openpose Image Processor",
|
||||
tags=["controlnet", "dwpose", "openpose"],
|
||||
category="controlnet",
|
||||
version="1.0.0",
|
||||
version="1.1.0",
|
||||
)
|
||||
class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates an openpose pose from an image using DWPose"""
|
||||
|
@ -13,7 +13,7 @@ from .baseinvocation import BaseInvocation, invocation
|
||||
from .fields import InputField, WithBoard, WithMetadata
|
||||
|
||||
|
||||
@invocation("cv_inpaint", title="OpenCV Inpaint", tags=["opencv", "inpaint"], category="inpaint", version="1.2.1")
|
||||
@invocation("cv_inpaint", title="OpenCV Inpaint", tags=["opencv", "inpaint"], category="inpaint", version="1.3.1")
|
||||
class CvInpaintInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Simple inpaint using opencv."""
|
||||
|
||||
|
@ -435,7 +435,7 @@ def get_faces_list(
|
||||
return all_faces
|
||||
|
||||
|
||||
@invocation("face_off", title="FaceOff", tags=["image", "faceoff", "face", "mask"], category="image", version="1.2.1")
|
||||
@invocation("face_off", title="FaceOff", tags=["image", "faceoff", "face", "mask"], category="image", version="1.2.2")
|
||||
class FaceOffInvocation(BaseInvocation, WithMetadata):
|
||||
"""Bound, extract, and mask a face from an image using MediaPipe detection"""
|
||||
|
||||
@ -514,7 +514,7 @@ class FaceOffInvocation(BaseInvocation, WithMetadata):
|
||||
return output
|
||||
|
||||
|
||||
@invocation("face_mask_detection", title="FaceMask", tags=["image", "face", "mask"], category="image", version="1.2.1")
|
||||
@invocation("face_mask_detection", title="FaceMask", tags=["image", "face", "mask"], category="image", version="1.2.2")
|
||||
class FaceMaskInvocation(BaseInvocation, WithMetadata):
|
||||
"""Face mask creation using mediapipe face detection"""
|
||||
|
||||
@ -617,7 +617,7 @@ class FaceMaskInvocation(BaseInvocation, WithMetadata):
|
||||
|
||||
|
||||
@invocation(
|
||||
"face_identifier", title="FaceIdentifier", tags=["image", "face", "identifier"], category="image", version="1.2.1"
|
||||
"face_identifier", title="FaceIdentifier", tags=["image", "face", "identifier"], category="image", version="1.2.2"
|
||||
)
|
||||
class FaceIdentifierInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Outputs an image with detected face IDs printed on each face. For use with other FaceTools."""
|
||||
|
@ -39,13 +39,15 @@ class UIType(str, Enum, metaclass=MetaEnum):
|
||||
"""
|
||||
|
||||
# region Model Field Types
|
||||
MainModel = "MainModelField"
|
||||
SDXLMainModel = "SDXLMainModelField"
|
||||
SDXLRefinerModel = "SDXLRefinerModelField"
|
||||
ONNXModel = "ONNXModelField"
|
||||
VaeModel = "VAEModelField"
|
||||
VAEModel = "VAEModelField"
|
||||
LoRAModel = "LoRAModelField"
|
||||
ControlNetModel = "ControlNetModelField"
|
||||
IPAdapterModel = "IPAdapterModelField"
|
||||
T2IAdapterModel = "T2IAdapterModelField"
|
||||
# endregion
|
||||
|
||||
# region Misc Field Types
|
||||
@ -86,7 +88,6 @@ class UIType(str, Enum, metaclass=MetaEnum):
|
||||
IntegerPolymorphic = "DEPRECATED_IntegerPolymorphic"
|
||||
LatentsPolymorphic = "DEPRECATED_LatentsPolymorphic"
|
||||
StringPolymorphic = "DEPRECATED_StringPolymorphic"
|
||||
MainModel = "DEPRECATED_MainModel"
|
||||
UNet = "DEPRECATED_UNet"
|
||||
Vae = "DEPRECATED_Vae"
|
||||
CLIP = "DEPRECATED_CLIP"
|
||||
@ -228,7 +229,7 @@ class ConditioningField(BaseModel):
|
||||
# endregion
|
||||
|
||||
|
||||
class MetadataField(RootModel):
|
||||
class MetadataField(RootModel[dict[str, Any]]):
|
||||
"""
|
||||
Pydantic model for metadata with custom root of type dict[str, Any].
|
||||
Metadata is stored without a strict schema.
|
||||
|
@ -49,7 +49,7 @@ class ShowImageInvocation(BaseInvocation):
|
||||
title="Blank Image",
|
||||
tags=["image"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class BlankImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Creates a blank image and forwards it to the pipeline"""
|
||||
@ -72,7 +72,7 @@ class BlankImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Crop Image",
|
||||
tags=["image", "crop"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ImageCropInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Crops an image to a specified box. The box can be outside of the image."""
|
||||
@ -143,7 +143,7 @@ class CenterPadCropInvocation(BaseInvocation):
|
||||
title="Paste Image",
|
||||
tags=["image", "paste"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ImagePasteInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Pastes an image into another image."""
|
||||
@ -190,7 +190,7 @@ class ImagePasteInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Mask from Alpha",
|
||||
tags=["image", "mask"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class MaskFromAlphaInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Extracts the alpha channel of an image as a mask."""
|
||||
@ -215,7 +215,7 @@ class MaskFromAlphaInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Multiply Images",
|
||||
tags=["image", "multiply"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ImageMultiplyInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Multiplies two images together using `PIL.ImageChops.multiply()`."""
|
||||
@ -242,7 +242,7 @@ IMAGE_CHANNELS = Literal["A", "R", "G", "B"]
|
||||
title="Extract Image Channel",
|
||||
tags=["image", "channel"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ImageChannelInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Gets a channel from an image."""
|
||||
@ -265,7 +265,7 @@ class ImageChannelInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Convert Image Mode",
|
||||
tags=["image", "convert"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ImageConvertInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Converts an image to a different mode."""
|
||||
@ -288,7 +288,7 @@ class ImageConvertInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Blur Image",
|
||||
tags=["image", "blur"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ImageBlurInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Blurs an image"""
|
||||
@ -316,7 +316,7 @@ class ImageBlurInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Unsharp Mask",
|
||||
tags=["image", "unsharp_mask"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class UnsharpMaskInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
@ -385,7 +385,7 @@ PIL_RESAMPLING_MAP = {
|
||||
title="Resize Image",
|
||||
tags=["image", "resize"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ImageResizeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Resizes an image to specific dimensions"""
|
||||
@ -415,7 +415,7 @@ class ImageResizeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Scale Image",
|
||||
tags=["image", "scale"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ImageScaleInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Scales an image by a factor"""
|
||||
@ -450,7 +450,7 @@ class ImageScaleInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Lerp Image",
|
||||
tags=["image", "lerp"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ImageLerpInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Linear interpolation of all pixels of an image"""
|
||||
@ -477,7 +477,7 @@ class ImageLerpInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Inverse Lerp Image",
|
||||
tags=["image", "ilerp"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ImageInverseLerpInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Inverse linear interpolation of all pixels of an image"""
|
||||
@ -504,7 +504,7 @@ class ImageInverseLerpInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Blur NSFW Image",
|
||||
tags=["image", "nsfw"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Add blur to NSFW-flagged images"""
|
||||
@ -539,7 +539,7 @@ class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Add Invisible Watermark",
|
||||
tags=["image", "watermark"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ImageWatermarkInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Add an invisible watermark to an image"""
|
||||
@ -560,7 +560,7 @@ class ImageWatermarkInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Mask Edge",
|
||||
tags=["image", "mask", "inpaint"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class MaskEdgeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Applies an edge mask to an image"""
|
||||
@ -599,7 +599,7 @@ class MaskEdgeInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Combine Masks",
|
||||
tags=["image", "mask", "multiply"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class MaskCombineInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Combine two masks together by multiplying them using `PIL.ImageChops.multiply()`."""
|
||||
@ -623,7 +623,7 @@ class MaskCombineInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Color Correct",
|
||||
tags=["image", "color"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ColorCorrectInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""
|
||||
@ -727,7 +727,7 @@ class ColorCorrectInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Adjust Image Hue",
|
||||
tags=["image", "hue"],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ImageHueAdjustmentInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Adjusts the Hue of an image."""
|
||||
@ -816,7 +816,7 @@ CHANNEL_FORMATS = {
|
||||
"value",
|
||||
],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Add or subtract a value from a specific color channel of an image."""
|
||||
@ -872,7 +872,7 @@ class ImageChannelOffsetInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"value",
|
||||
],
|
||||
category="image",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class ImageChannelMultiplyInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Scale a specific color channel of an image."""
|
||||
@ -916,7 +916,7 @@ class ImageChannelMultiplyInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Save Image",
|
||||
tags=["primitives", "image"],
|
||||
category="primitives",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
use_cache=False,
|
||||
)
|
||||
class SaveImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
@ -9,6 +9,7 @@ from PIL import Image, ImageOps
|
||||
from invokeai.app.invocations.fields import ColorField, ImageField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.download_with_progress import download_with_progress_bar
|
||||
from invokeai.app.util.misc import SEED_MAX
|
||||
from invokeai.backend.image_util.cv2_inpaint import cv2_inpaint
|
||||
from invokeai.backend.image_util.lama import LaMA
|
||||
@ -120,7 +121,7 @@ def tile_fill_missing(im: Image.Image, tile_size: int = 16, seed: Optional[int]
|
||||
return si
|
||||
|
||||
|
||||
@invocation("infill_rgba", title="Solid Color Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.1")
|
||||
@invocation("infill_rgba", title="Solid Color Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.2")
|
||||
class InfillColorInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Infills transparent areas of an image with a solid color"""
|
||||
|
||||
@ -143,7 +144,7 @@ class InfillColorInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
|
||||
@invocation("infill_tile", title="Tile Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.2")
|
||||
@invocation("infill_tile", title="Tile Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.3")
|
||||
class InfillTileInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Infills transparent areas of an image with tiles of the image"""
|
||||
|
||||
@ -168,7 +169,7 @@ class InfillTileInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
|
||||
@invocation(
|
||||
"infill_patchmatch", title="PatchMatch Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.1"
|
||||
"infill_patchmatch", title="PatchMatch Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.2"
|
||||
)
|
||||
class InfillPatchMatchInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Infills transparent areas of an image using the PatchMatch algorithm"""
|
||||
@ -208,7 +209,7 @@ class InfillPatchMatchInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
|
||||
@invocation("infill_lama", title="LaMa Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.1")
|
||||
@invocation("infill_lama", title="LaMa Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.2")
|
||||
class LaMaInfillInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Infills transparent areas of an image using the LaMa model"""
|
||||
|
||||
@ -217,6 +218,13 @@ class LaMaInfillInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
|
||||
# Downloads the LaMa model if it doesn't already exist
|
||||
download_with_progress_bar(
|
||||
name="LaMa Inpainting Model",
|
||||
url="https://github.com/Sanster/models/releases/download/add_big_lama/big-lama.pt",
|
||||
dest_path=context.config.get().models_path / "core/misc/lama/lama.pt",
|
||||
)
|
||||
|
||||
infilled = infill_lama(image.copy())
|
||||
|
||||
image_dto = context.images.save(image=infilled)
|
||||
@ -224,7 +232,7 @@ class LaMaInfillInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
|
||||
@invocation("infill_cv2", title="CV2 Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.1")
|
||||
@invocation("infill_cv2", title="CV2 Infill", tags=["image", "inpaint"], category="inpaint", version="1.2.2")
|
||||
class CV2InfillInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Infills transparent areas of an image using OpenCV Inpainting"""
|
||||
|
||||
|
@ -10,26 +10,18 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager.config import BaseModelType, ModelType
|
||||
|
||||
|
||||
# LS: Consider moving these two classes into model.py
|
||||
class IPAdapterModelField(BaseModel):
|
||||
key: str = Field(description="Key to the IP-Adapter model")
|
||||
|
||||
|
||||
class CLIPVisionModelField(BaseModel):
|
||||
key: str = Field(description="Key to the CLIP Vision image encoder model")
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, IPAdapterConfig, ModelType
|
||||
|
||||
|
||||
class IPAdapterField(BaseModel):
|
||||
image: Union[ImageField, List[ImageField]] = Field(description="The IP-Adapter image prompt(s).")
|
||||
ip_adapter_model: IPAdapterModelField = Field(description="The IP-Adapter model to use.")
|
||||
image_encoder_model: CLIPVisionModelField = Field(description="The name of the CLIP image encoder model.")
|
||||
ip_adapter_model: ModelIdentifierField = Field(description="The IP-Adapter model to use.")
|
||||
image_encoder_model: ModelIdentifierField = Field(description="The name of the CLIP image encoder model.")
|
||||
weight: Union[float, List[float]] = Field(default=1, description="The weight given to the ControlNet")
|
||||
begin_step_percent: float = Field(
|
||||
default=0, ge=0, le=1, description="When the IP-Adapter is first applied (% of total steps)"
|
||||
@ -56,14 +48,18 @@ class IPAdapterOutput(BaseInvocationOutput):
|
||||
ip_adapter: IPAdapterField = OutputField(description=FieldDescriptions.ip_adapter, title="IP-Adapter")
|
||||
|
||||
|
||||
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.1.2")
|
||||
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.2.2")
|
||||
class IPAdapterInvocation(BaseInvocation):
|
||||
"""Collects IP-Adapter info to pass to other nodes."""
|
||||
|
||||
# Inputs
|
||||
image: Union[ImageField, List[ImageField]] = InputField(description="The IP-Adapter image prompt(s).")
|
||||
ip_adapter_model: IPAdapterModelField = InputField(
|
||||
description="The IP-Adapter model.", title="IP-Adapter Model", input=Input.Direct, ui_order=-1
|
||||
ip_adapter_model: ModelIdentifierField = InputField(
|
||||
description="The IP-Adapter model.",
|
||||
title="IP-Adapter Model",
|
||||
input=Input.Direct,
|
||||
ui_order=-1,
|
||||
ui_type=UIType.IPAdapterModel,
|
||||
)
|
||||
|
||||
weight: Union[float, List[float]] = InputField(
|
||||
@ -90,20 +86,35 @@ class IPAdapterInvocation(BaseInvocation):
|
||||
def invoke(self, context: InvocationContext) -> IPAdapterOutput:
|
||||
# Lookup the CLIP Vision encoder that is intended to be used with the IP-Adapter model.
|
||||
ip_adapter_info = context.models.get_config(self.ip_adapter_model.key)
|
||||
assert isinstance(ip_adapter_info, IPAdapterConfig)
|
||||
image_encoder_model_id = ip_adapter_info.image_encoder_model_id
|
||||
image_encoder_model_name = image_encoder_model_id.split("/")[-1].strip()
|
||||
image_encoder_models = context.models.search_by_attrs(
|
||||
name=image_encoder_model_name, base=BaseModelType.Any, type=ModelType.CLIPVision
|
||||
)
|
||||
assert len(image_encoder_models) == 1
|
||||
image_encoder_model = CLIPVisionModelField(key=image_encoder_models[0].key)
|
||||
image_encoder_model = self._get_image_encoder(context, image_encoder_model_name)
|
||||
return IPAdapterOutput(
|
||||
ip_adapter=IPAdapterField(
|
||||
image=self.image,
|
||||
ip_adapter_model=self.ip_adapter_model,
|
||||
image_encoder_model=image_encoder_model,
|
||||
image_encoder_model=ModelIdentifierField.from_config(image_encoder_model),
|
||||
weight=self.weight,
|
||||
begin_step_percent=self.begin_step_percent,
|
||||
end_step_percent=self.end_step_percent,
|
||||
),
|
||||
)
|
||||
|
||||
def _get_image_encoder(self, context: InvocationContext, image_encoder_model_name: str) -> AnyModelConfig:
|
||||
found = False
|
||||
while not found:
|
||||
image_encoder_models = context.models.search_by_attrs(
|
||||
name=image_encoder_model_name, base=BaseModelType.Any, type=ModelType.CLIPVision
|
||||
)
|
||||
found = len(image_encoder_models) > 0
|
||||
if not found:
|
||||
context.logger.warning(
|
||||
f"The image encoder required by this IP Adapter ({image_encoder_model_name}) is not installed."
|
||||
)
|
||||
context.logger.warning("Downloading and installing now. This may take a while.")
|
||||
installer = context._services.model_manager.install
|
||||
job = installer.heuristic_import(f"InvokeAI/{image_encoder_model_name}")
|
||||
installer.wait_for_job(job, timeout=600) # wait up to 10 minutes - then raise a TimeoutException
|
||||
assert len(image_encoder_models) == 1
|
||||
return image_encoder_models[0]
|
||||
|
@ -26,6 +26,7 @@ from diffusers.schedulers import SchedulerMixin as Scheduler
|
||||
from PIL import Image, ImageFilter
|
||||
from pydantic import field_validator
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
from transformers import CLIPVisionModelWithProjection
|
||||
|
||||
from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR, SCHEDULER_NAME_VALUES
|
||||
from invokeai.app.invocations.fields import (
|
||||
@ -65,7 +66,6 @@ from ...backend.stable_diffusion.diffusers_pipeline import (
|
||||
T2IAdapterData,
|
||||
image_resized_to_grid_as_tensor,
|
||||
)
|
||||
from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import PostprocessingSettings
|
||||
from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
|
||||
from ...backend.util.devices import choose_precision, choose_torch_device
|
||||
from .baseinvocation import (
|
||||
@ -75,7 +75,7 @@ from .baseinvocation import (
|
||||
invocation_output,
|
||||
)
|
||||
from .controlnet_image_processors import ControlField
|
||||
from .model import ModelInfo, UNetField, VaeField
|
||||
from .model import ModelIdentifierField, UNetField, VAEField
|
||||
|
||||
if choose_torch_device() == torch.device("mps"):
|
||||
from torch import mps
|
||||
@ -113,12 +113,12 @@ class SchedulerInvocation(BaseInvocation):
|
||||
title="Create Denoise Mask",
|
||||
tags=["mask", "denoise"],
|
||||
category="latents",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class CreateDenoiseMaskInvocation(BaseInvocation):
|
||||
"""Creates mask for denoising model run."""
|
||||
|
||||
vae: VaeField = InputField(description=FieldDescriptions.vae, input=Input.Connection, ui_order=0)
|
||||
vae: VAEField = InputField(description=FieldDescriptions.vae, input=Input.Connection, ui_order=0)
|
||||
image: Optional[ImageField] = InputField(default=None, description="Image which will be masked", ui_order=1)
|
||||
mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2)
|
||||
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3)
|
||||
@ -153,7 +153,7 @@ class CreateDenoiseMaskInvocation(BaseInvocation):
|
||||
)
|
||||
|
||||
if image_tensor is not None:
|
||||
vae_info = context.models.load(**self.vae.vae.model_dump())
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
|
||||
img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
|
||||
masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0)
|
||||
@ -244,12 +244,12 @@ class CreateGradientMaskInvocation(BaseInvocation):
|
||||
|
||||
def get_scheduler(
|
||||
context: InvocationContext,
|
||||
scheduler_info: ModelInfo,
|
||||
scheduler_info: ModelIdentifierField,
|
||||
scheduler_name: str,
|
||||
seed: int,
|
||||
) -> Scheduler:
|
||||
scheduler_class, scheduler_extra_config = SCHEDULER_MAP.get(scheduler_name, SCHEDULER_MAP["ddim"])
|
||||
orig_scheduler_info = context.models.load(**scheduler_info.model_dump())
|
||||
orig_scheduler_info = context.models.load(scheduler_info)
|
||||
with orig_scheduler_info as orig_scheduler:
|
||||
scheduler_config = orig_scheduler.config
|
||||
|
||||
@ -279,7 +279,7 @@ def get_scheduler(
|
||||
title="Denoise Latents",
|
||||
tags=["latents", "denoise", "txt2img", "t2i", "t2l", "img2img", "i2i", "l2l"],
|
||||
category="latents",
|
||||
version="1.5.2",
|
||||
version="1.5.3",
|
||||
)
|
||||
class DenoiseLatentsInvocation(BaseInvocation):
|
||||
"""Denoises noisy latents to decodable images"""
|
||||
@ -383,12 +383,6 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
text_embeddings=c,
|
||||
guidance_scale=self.cfg_scale,
|
||||
guidance_rescale_multiplier=self.cfg_rescale_multiplier,
|
||||
postprocessing_settings=PostprocessingSettings(
|
||||
threshold=0.0, # threshold,
|
||||
warmup=0.2, # warmup,
|
||||
h_symmetry_time_pct=None, # h_symmetry_time_pct,
|
||||
v_symmetry_time_pct=None, # v_symmetry_time_pct,
|
||||
),
|
||||
)
|
||||
|
||||
conditioning_data = conditioning_data.add_scheduler_args_if_applicable( # FIXME
|
||||
@ -461,7 +455,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
# and if weight is None, populate with default 1.0?
|
||||
controlnet_data = []
|
||||
for control_info in control_list:
|
||||
control_model = exit_stack.enter_context(context.models.load(key=control_info.control_model.key))
|
||||
control_model = exit_stack.enter_context(context.models.load(control_info.control_model))
|
||||
|
||||
# control_models.append(control_model)
|
||||
control_image_field = control_info.image
|
||||
@ -523,11 +517,10 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
conditioning_data.ip_adapter_conditioning = []
|
||||
for single_ip_adapter in ip_adapter:
|
||||
ip_adapter_model: Union[IPAdapter, IPAdapterPlus] = exit_stack.enter_context(
|
||||
context.models.load(key=single_ip_adapter.ip_adapter_model.key)
|
||||
context.models.load(single_ip_adapter.ip_adapter_model)
|
||||
)
|
||||
|
||||
image_encoder_model_info = context.models.load(key=single_ip_adapter.image_encoder_model.key)
|
||||
|
||||
image_encoder_model_info = context.models.load(single_ip_adapter.image_encoder_model)
|
||||
# `single_ip_adapter.image` could be a list or a single ImageField. Normalize to a list here.
|
||||
single_ipa_image_fields = single_ip_adapter.image
|
||||
if not isinstance(single_ipa_image_fields, list):
|
||||
@ -538,6 +531,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
# TODO(ryand): With some effort, the step of running the CLIP Vision encoder could be done before any other
|
||||
# models are needed in memory. This would help to reduce peak memory utilization in low-memory environments.
|
||||
with image_encoder_model_info as image_encoder_model:
|
||||
assert isinstance(image_encoder_model, CLIPVisionModelWithProjection)
|
||||
# Get image embeddings from CLIP and ImageProjModel.
|
||||
image_prompt_embeds, uncond_image_prompt_embeds = ip_adapter_model.get_image_embeds(
|
||||
single_ipa_images, image_encoder_model
|
||||
@ -577,8 +571,8 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
|
||||
t2i_adapter_data = []
|
||||
for t2i_adapter_field in t2i_adapter:
|
||||
t2i_adapter_model_config = context.models.get_config(key=t2i_adapter_field.t2i_adapter_model.key)
|
||||
t2i_adapter_loaded_model = context.models.load(key=t2i_adapter_field.t2i_adapter_model.key)
|
||||
t2i_adapter_model_config = context.models.get_config(t2i_adapter_field.t2i_adapter_model.key)
|
||||
t2i_adapter_loaded_model = context.models.load(t2i_adapter_field.t2i_adapter_model)
|
||||
image = context.images.get_pil(t2i_adapter_field.image.image_name)
|
||||
|
||||
# The max_unet_downscale is the maximum amount that the UNet model downscales the latent image internally.
|
||||
@ -683,7 +677,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
if self.denoise_mask.masked_latents_name is not None:
|
||||
masked_latents = context.tensors.load(self.denoise_mask.masked_latents_name)
|
||||
else:
|
||||
masked_latents = None
|
||||
masked_latents = torch.where(mask < 0.5, 0.0, latents)
|
||||
|
||||
return 1 - mask, masked_latents, self.denoise_mask.gradient
|
||||
|
||||
@ -731,12 +725,13 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
|
||||
def _lora_loader() -> Iterator[Tuple[LoRAModelRaw, float]]:
|
||||
for lora in self.unet.loras:
|
||||
lora_info = context.models.load(**lora.model_dump(exclude={"weight"}))
|
||||
lora_info = context.models.load(lora.lora)
|
||||
assert isinstance(lora_info.model, LoRAModelRaw)
|
||||
yield (lora_info.model, lora.weight)
|
||||
del lora_info
|
||||
return
|
||||
|
||||
unet_info = context.models.load(**self.unet.unet.model_dump())
|
||||
unet_info = context.models.load(self.unet.unet)
|
||||
assert isinstance(unet_info.model, UNet2DConditionModel)
|
||||
with (
|
||||
ExitStack() as exit_stack,
|
||||
@ -821,7 +816,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
title="Latents to Image",
|
||||
tags=["latents", "image", "vae", "l2i"],
|
||||
category="latents",
|
||||
version="1.2.1",
|
||||
version="1.2.2",
|
||||
)
|
||||
class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Generates an image from latents."""
|
||||
@ -830,7 +825,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
description=FieldDescriptions.latents,
|
||||
input=Input.Connection,
|
||||
)
|
||||
vae: VaeField = InputField(
|
||||
vae: VAEField = InputField(
|
||||
description=FieldDescriptions.vae,
|
||||
input=Input.Connection,
|
||||
)
|
||||
@ -841,15 +836,15 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
latents = context.tensors.load(self.latents.latents_name)
|
||||
|
||||
vae_info = context.models.load(**self.vae.vae.model_dump())
|
||||
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
assert isinstance(vae_info.model, (UNet2DConditionModel, AutoencoderKL, AutoencoderTiny))
|
||||
with set_seamless(vae_info.model, self.vae.seamless_axes), vae_info as vae:
|
||||
assert isinstance(vae, torch.nn.Module)
|
||||
latents = latents.to(vae.device)
|
||||
if self.fp32:
|
||||
vae.to(dtype=torch.float32)
|
||||
|
||||
use_torch_2_0_or_xformers = isinstance(
|
||||
use_torch_2_0_or_xformers = hasattr(vae.decoder, "mid_block") and isinstance(
|
||||
vae.decoder.mid_block.attentions[0].processor,
|
||||
(
|
||||
AttnProcessor2_0,
|
||||
@ -871,7 +866,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
vae.to(dtype=torch.float16)
|
||||
latents = latents.half()
|
||||
|
||||
if self.tiled or context.config.get().tiled_decode:
|
||||
if self.tiled or context.config.get().force_tiled_decode:
|
||||
vae.enable_tiling()
|
||||
else:
|
||||
vae.disable_tiling()
|
||||
@ -908,7 +903,7 @@ LATENTS_INTERPOLATION_MODE = Literal["nearest", "linear", "bilinear", "bicubic",
|
||||
title="Resize Latents",
|
||||
tags=["latents", "resize"],
|
||||
category="latents",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class ResizeLatentsInvocation(BaseInvocation):
|
||||
"""Resizes latents to explicit width/height (in pixels). Provided dimensions are floor-divided by 8."""
|
||||
@ -958,7 +953,7 @@ class ResizeLatentsInvocation(BaseInvocation):
|
||||
title="Scale Latents",
|
||||
tags=["latents", "resize"],
|
||||
category="latents",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class ScaleLatentsInvocation(BaseInvocation):
|
||||
"""Scales latents by a given factor."""
|
||||
@ -1000,7 +995,7 @@ class ScaleLatentsInvocation(BaseInvocation):
|
||||
title="Image to Latents",
|
||||
tags=["latents", "image", "vae", "i2l"],
|
||||
category="latents",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class ImageToLatentsInvocation(BaseInvocation):
|
||||
"""Encodes an image into latents."""
|
||||
@ -1008,7 +1003,7 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
image: ImageField = InputField(
|
||||
description="The image to encode",
|
||||
)
|
||||
vae: VaeField = InputField(
|
||||
vae: VAEField = InputField(
|
||||
description=FieldDescriptions.vae,
|
||||
input=Input.Connection,
|
||||
)
|
||||
@ -1023,7 +1018,7 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
if upcast:
|
||||
vae.to(dtype=torch.float32)
|
||||
|
||||
use_torch_2_0_or_xformers = isinstance(
|
||||
use_torch_2_0_or_xformers = hasattr(vae.decoder, "mid_block") and isinstance(
|
||||
vae.decoder.mid_block.attentions[0].processor,
|
||||
(
|
||||
AttnProcessor2_0,
|
||||
@ -1064,7 +1059,7 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
|
||||
vae_info = context.models.load(**self.vae.vae.model_dump())
|
||||
vae_info = context.models.load(self.vae.vae)
|
||||
|
||||
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
||||
if image_tensor.dim() == 3:
|
||||
@ -1099,7 +1094,7 @@ class ImageToLatentsInvocation(BaseInvocation):
|
||||
title="Blend Latents",
|
||||
tags=["latents", "blend"],
|
||||
category="latents",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class BlendLatentsInvocation(BaseInvocation):
|
||||
"""Blend two latents using a given alpha. Latents must have same size."""
|
||||
@ -1190,7 +1185,7 @@ class BlendLatentsInvocation(BaseInvocation):
|
||||
title="Crop Latents",
|
||||
tags=["latents", "crop"],
|
||||
category="latents",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
# TODO(ryand): Named `CropLatentsCoreInvocation` to prevent a conflict with custom node `CropLatentsInvocation`.
|
||||
# Currently, if the class names conflict then 'GET /openapi.json' fails.
|
||||
@ -1251,7 +1246,7 @@ class IdealSizeOutput(BaseInvocationOutput):
|
||||
"ideal_size",
|
||||
title="Ideal Size",
|
||||
tags=["latents", "math", "ideal_size"],
|
||||
version="1.0.2",
|
||||
version="1.0.3",
|
||||
)
|
||||
class IdealSizeInvocation(BaseInvocation):
|
||||
"""Calculates the ideal size for generation to avoid duplication"""
|
||||
|
@ -12,7 +12,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from .baseinvocation import BaseInvocation, invocation
|
||||
|
||||
|
||||
@invocation("add", title="Add Integers", tags=["math", "add"], category="math", version="1.0.0")
|
||||
@invocation("add", title="Add Integers", tags=["math", "add"], category="math", version="1.0.1")
|
||||
class AddInvocation(BaseInvocation):
|
||||
"""Adds two numbers"""
|
||||
|
||||
@ -23,7 +23,7 @@ class AddInvocation(BaseInvocation):
|
||||
return IntegerOutput(value=self.a + self.b)
|
||||
|
||||
|
||||
@invocation("sub", title="Subtract Integers", tags=["math", "subtract"], category="math", version="1.0.0")
|
||||
@invocation("sub", title="Subtract Integers", tags=["math", "subtract"], category="math", version="1.0.1")
|
||||
class SubtractInvocation(BaseInvocation):
|
||||
"""Subtracts two numbers"""
|
||||
|
||||
@ -34,7 +34,7 @@ class SubtractInvocation(BaseInvocation):
|
||||
return IntegerOutput(value=self.a - self.b)
|
||||
|
||||
|
||||
@invocation("mul", title="Multiply Integers", tags=["math", "multiply"], category="math", version="1.0.0")
|
||||
@invocation("mul", title="Multiply Integers", tags=["math", "multiply"], category="math", version="1.0.1")
|
||||
class MultiplyInvocation(BaseInvocation):
|
||||
"""Multiplies two numbers"""
|
||||
|
||||
@ -45,7 +45,7 @@ class MultiplyInvocation(BaseInvocation):
|
||||
return IntegerOutput(value=self.a * self.b)
|
||||
|
||||
|
||||
@invocation("div", title="Divide Integers", tags=["math", "divide"], category="math", version="1.0.0")
|
||||
@invocation("div", title="Divide Integers", tags=["math", "divide"], category="math", version="1.0.1")
|
||||
class DivideInvocation(BaseInvocation):
|
||||
"""Divides two numbers"""
|
||||
|
||||
@ -61,7 +61,7 @@ class DivideInvocation(BaseInvocation):
|
||||
title="Random Integer",
|
||||
tags=["math", "random"],
|
||||
category="math",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
use_cache=False,
|
||||
)
|
||||
class RandomIntInvocation(BaseInvocation):
|
||||
@ -100,7 +100,7 @@ class RandomFloatInvocation(BaseInvocation):
|
||||
title="Float To Integer",
|
||||
tags=["math", "round", "integer", "float", "convert"],
|
||||
category="math",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class FloatToIntegerInvocation(BaseInvocation):
|
||||
"""Rounds a float number to (a multiple of) an integer."""
|
||||
@ -122,7 +122,7 @@ class FloatToIntegerInvocation(BaseInvocation):
|
||||
return IntegerOutput(value=int(self.value / self.multiple) * self.multiple)
|
||||
|
||||
|
||||
@invocation("round_float", title="Round Float", tags=["math", "round"], category="math", version="1.0.0")
|
||||
@invocation("round_float", title="Round Float", tags=["math", "round"], category="math", version="1.0.1")
|
||||
class RoundInvocation(BaseInvocation):
|
||||
"""Rounds a float to a specified number of decimal places."""
|
||||
|
||||
@ -176,7 +176,7 @@ INTEGER_OPERATIONS_LABELS = {
|
||||
"max",
|
||||
],
|
||||
category="math",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class IntegerMathInvocation(BaseInvocation):
|
||||
"""Performs integer math."""
|
||||
@ -250,7 +250,7 @@ FLOAT_OPERATIONS_LABELS = {
|
||||
title="Float Math",
|
||||
tags=["math", "float", "add", "subtract", "multiply", "divide", "power", "root", "absolute value", "min", "max"],
|
||||
category="math",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class FloatMathInvocation(BaseInvocation):
|
||||
"""Performs floating point math."""
|
||||
|
@ -8,7 +8,10 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.controlnet_image_processors import ControlField
|
||||
from invokeai.app.invocations.controlnet_image_processors import (
|
||||
CONTROLNET_MODE_VALUES,
|
||||
CONTROLNET_RESIZE_VALUES,
|
||||
)
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
ImageField,
|
||||
@ -17,9 +20,7 @@ from invokeai.app.invocations.fields import (
|
||||
OutputField,
|
||||
UIType,
|
||||
)
|
||||
from invokeai.app.invocations.ip_adapter import IPAdapterModelField
|
||||
from invokeai.app.invocations.model import LoRAModelField, MainModelField, VAEModelField
|
||||
from invokeai.app.invocations.t2i_adapter import T2IAdapterField
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
|
||||
from ...version import __version__
|
||||
@ -33,7 +34,7 @@ class MetadataItemField(BaseModel):
|
||||
class LoRAMetadataField(BaseModel):
|
||||
"""LoRA Metadata Field"""
|
||||
|
||||
model: LoRAModelField = Field(description=FieldDescriptions.lora_model)
|
||||
model: ModelIdentifierField = Field(description=FieldDescriptions.lora_model)
|
||||
weight: float = Field(description=FieldDescriptions.lora_weight)
|
||||
|
||||
|
||||
@ -41,16 +42,41 @@ class IPAdapterMetadataField(BaseModel):
|
||||
"""IP Adapter Field, minus the CLIP Vision Encoder model"""
|
||||
|
||||
image: ImageField = Field(description="The IP-Adapter image prompt.")
|
||||
ip_adapter_model: IPAdapterModelField = Field(
|
||||
description="The IP-Adapter model.",
|
||||
)
|
||||
weight: Union[float, list[float]] = Field(
|
||||
description="The weight given to the IP-Adapter",
|
||||
)
|
||||
ip_adapter_model: ModelIdentifierField = Field(description="The IP-Adapter model.")
|
||||
weight: Union[float, list[float]] = Field(description="The weight given to the IP-Adapter")
|
||||
begin_step_percent: float = Field(description="When the IP-Adapter is first applied (% of total steps)")
|
||||
end_step_percent: float = Field(description="When the IP-Adapter is last applied (% of total steps)")
|
||||
|
||||
|
||||
class T2IAdapterMetadataField(BaseModel):
|
||||
image: ImageField = Field(description="The control image.")
|
||||
processed_image: Optional[ImageField] = Field(default=None, description="The control image, after processing.")
|
||||
t2i_adapter_model: ModelIdentifierField = Field(description="The T2I-Adapter model to use.")
|
||||
weight: Union[float, list[float]] = Field(default=1, description="The weight given to the T2I-Adapter")
|
||||
begin_step_percent: float = Field(
|
||||
default=0, ge=0, le=1, description="When the T2I-Adapter is first applied (% of total steps)"
|
||||
)
|
||||
end_step_percent: float = Field(
|
||||
default=1, ge=0, le=1, description="When the T2I-Adapter is last applied (% of total steps)"
|
||||
)
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
|
||||
|
||||
|
||||
class ControlNetMetadataField(BaseModel):
|
||||
image: ImageField = Field(description="The control image")
|
||||
processed_image: Optional[ImageField] = Field(default=None, description="The control image, after processing.")
|
||||
control_model: ModelIdentifierField = Field(description="The ControlNet model to use")
|
||||
control_weight: Union[float, list[float]] = Field(default=1, description="The weight given to the ControlNet")
|
||||
begin_step_percent: float = Field(
|
||||
default=0, ge=0, le=1, description="When the ControlNet is first applied (% of total steps)"
|
||||
)
|
||||
end_step_percent: float = Field(
|
||||
default=1, ge=0, le=1, description="When the ControlNet is last applied (% of total steps)"
|
||||
)
|
||||
control_mode: CONTROLNET_MODE_VALUES = Field(default="balanced", description="The control mode to use")
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use")
|
||||
|
||||
|
||||
@invocation_output("metadata_item_output")
|
||||
class MetadataItemOutput(BaseInvocationOutput):
|
||||
"""Metadata Item Output"""
|
||||
@ -58,7 +84,7 @@ class MetadataItemOutput(BaseInvocationOutput):
|
||||
item: MetadataItemField = OutputField(description="Metadata Item")
|
||||
|
||||
|
||||
@invocation("metadata_item", title="Metadata Item", tags=["metadata"], category="metadata", version="1.0.0")
|
||||
@invocation("metadata_item", title="Metadata Item", tags=["metadata"], category="metadata", version="1.0.1")
|
||||
class MetadataItemInvocation(BaseInvocation):
|
||||
"""Used to create an arbitrary metadata item. Provide "label" and make a connection to "value" to store that data as the value."""
|
||||
|
||||
@ -74,7 +100,7 @@ class MetadataOutput(BaseInvocationOutput):
|
||||
metadata: MetadataField = OutputField(description="Metadata Dict")
|
||||
|
||||
|
||||
@invocation("metadata", title="Metadata", tags=["metadata"], category="metadata", version="1.0.0")
|
||||
@invocation("metadata", title="Metadata", tags=["metadata"], category="metadata", version="1.0.1")
|
||||
class MetadataInvocation(BaseInvocation):
|
||||
"""Takes a MetadataItem or collection of MetadataItems and outputs a MetadataDict."""
|
||||
|
||||
@ -95,7 +121,7 @@ class MetadataInvocation(BaseInvocation):
|
||||
return MetadataOutput(metadata=MetadataField.model_validate(data))
|
||||
|
||||
|
||||
@invocation("merge_metadata", title="Metadata Merge", tags=["metadata"], category="metadata", version="1.0.0")
|
||||
@invocation("merge_metadata", title="Metadata Merge", tags=["metadata"], category="metadata", version="1.0.1")
|
||||
class MergeMetadataInvocation(BaseInvocation):
|
||||
"""Merged a collection of MetadataDict into a single MetadataDict."""
|
||||
|
||||
@ -114,7 +140,7 @@ GENERATION_MODES = Literal[
|
||||
]
|
||||
|
||||
|
||||
@invocation("core_metadata", title="Core Metadata", tags=["metadata"], category="metadata", version="1.1.1")
|
||||
@invocation("core_metadata", title="Core Metadata", tags=["metadata"], category="metadata", version="2.0.0")
|
||||
class CoreMetadataInvocation(BaseInvocation):
|
||||
"""Collects core generation metadata into a MetadataField"""
|
||||
|
||||
@ -140,14 +166,14 @@ class CoreMetadataInvocation(BaseInvocation):
|
||||
default=None,
|
||||
description="The number of skipped CLIP layers",
|
||||
)
|
||||
model: Optional[MainModelField] = InputField(default=None, description="The main model used for inference")
|
||||
controlnets: Optional[list[ControlField]] = InputField(
|
||||
model: Optional[ModelIdentifierField] = InputField(default=None, description="The main model used for inference")
|
||||
controlnets: Optional[list[ControlNetMetadataField]] = InputField(
|
||||
default=None, description="The ControlNets used for inference"
|
||||
)
|
||||
ipAdapters: Optional[list[IPAdapterMetadataField]] = InputField(
|
||||
default=None, description="The IP Adapters used for inference"
|
||||
)
|
||||
t2iAdapters: Optional[list[T2IAdapterField]] = InputField(
|
||||
t2iAdapters: Optional[list[T2IAdapterMetadataField]] = InputField(
|
||||
default=None, description="The IP Adapters used for inference"
|
||||
)
|
||||
loras: Optional[list[LoRAMetadataField]] = InputField(default=None, description="The LoRAs used for inference")
|
||||
@ -159,7 +185,7 @@ class CoreMetadataInvocation(BaseInvocation):
|
||||
default=None,
|
||||
description="The name of the initial image",
|
||||
)
|
||||
vae: Optional[VAEModelField] = InputField(
|
||||
vae: Optional[ModelIdentifierField] = InputField(
|
||||
default=None,
|
||||
description="The VAE used for decoding, if the main model's default was not used",
|
||||
)
|
||||
@ -190,7 +216,7 @@ class CoreMetadataInvocation(BaseInvocation):
|
||||
)
|
||||
|
||||
# SDXL Refiner
|
||||
refiner_model: Optional[MainModelField] = InputField(
|
||||
refiner_model: Optional[ModelIdentifierField] = InputField(
|
||||
default=None,
|
||||
description="The SDXL Refiner model used",
|
||||
)
|
||||
@ -222,10 +248,9 @@ class CoreMetadataInvocation(BaseInvocation):
|
||||
def invoke(self, context: InvocationContext) -> MetadataOutput:
|
||||
"""Collects and outputs a CoreMetadata object"""
|
||||
|
||||
return MetadataOutput(
|
||||
metadata=MetadataField.model_validate(
|
||||
self.model_dump(exclude_none=True, exclude={"id", "type", "is_intermediate", "use_cache"})
|
||||
)
|
||||
)
|
||||
as_dict = self.model_dump(exclude_none=True, exclude={"id", "type", "is_intermediate", "use_cache"})
|
||||
as_dict["app_version"] = __version__
|
||||
|
||||
return MetadataOutput(metadata=MetadataField.model_validate(as_dict))
|
||||
|
||||
model_config = ConfigDict(extra="allow")
|
||||
|
@ -3,11 +3,11 @@ from typing import List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.shared.models import FreeUConfig
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelType, SubModelType
|
||||
|
||||
from ...backend.model_manager import SubModelType
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
@ -16,33 +16,52 @@ from .baseinvocation import (
|
||||
)
|
||||
|
||||
|
||||
class ModelInfo(BaseModel):
|
||||
key: str = Field(description="Key of model as returned by ModelRecordServiceBase.get_model()")
|
||||
submodel_type: Optional[SubModelType] = Field(default=None, description="Info to load submodel")
|
||||
class ModelIdentifierField(BaseModel):
|
||||
key: str = Field(description="The model's unique key")
|
||||
hash: str = Field(description="The model's BLAKE3 hash")
|
||||
name: str = Field(description="The model's name")
|
||||
base: BaseModelType = Field(description="The model's base model type")
|
||||
type: ModelType = Field(description="The model's type")
|
||||
submodel_type: Optional[SubModelType] = Field(
|
||||
description="The submodel to load, if this is a main model", default=None
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_config(
|
||||
cls, config: "AnyModelConfig", submodel_type: Optional[SubModelType] = None
|
||||
) -> "ModelIdentifierField":
|
||||
return cls(
|
||||
key=config.key,
|
||||
hash=config.hash,
|
||||
name=config.name,
|
||||
base=config.base,
|
||||
type=config.type,
|
||||
submodel_type=submodel_type,
|
||||
)
|
||||
|
||||
|
||||
class LoraInfo(ModelInfo):
|
||||
weight: float = Field(description="Lora's weight which to use when apply to model")
|
||||
class LoRAField(BaseModel):
|
||||
lora: ModelIdentifierField = Field(description="Info to load lora model")
|
||||
weight: float = Field(description="Weight to apply to lora model")
|
||||
|
||||
|
||||
class UNetField(BaseModel):
|
||||
unet: ModelInfo = Field(description="Info to load unet submodel")
|
||||
scheduler: ModelInfo = Field(description="Info to load scheduler submodel")
|
||||
loras: List[LoraInfo] = Field(description="Loras to apply on model loading")
|
||||
unet: ModelIdentifierField = Field(description="Info to load unet submodel")
|
||||
scheduler: ModelIdentifierField = Field(description="Info to load scheduler submodel")
|
||||
loras: List[LoRAField] = Field(description="LoRAs to apply on model loading")
|
||||
seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless')
|
||||
freeu_config: Optional[FreeUConfig] = Field(default=None, description="FreeU configuration")
|
||||
|
||||
|
||||
class ClipField(BaseModel):
|
||||
tokenizer: ModelInfo = Field(description="Info to load tokenizer submodel")
|
||||
text_encoder: ModelInfo = Field(description="Info to load text_encoder submodel")
|
||||
class CLIPField(BaseModel):
|
||||
tokenizer: ModelIdentifierField = Field(description="Info to load tokenizer submodel")
|
||||
text_encoder: ModelIdentifierField = Field(description="Info to load text_encoder submodel")
|
||||
skipped_layers: int = Field(description="Number of skipped layers in text_encoder")
|
||||
loras: List[LoraInfo] = Field(description="Loras to apply on model loading")
|
||||
loras: List[LoRAField] = Field(description="LoRAs to apply on model loading")
|
||||
|
||||
|
||||
class VaeField(BaseModel):
|
||||
# TODO: better naming?
|
||||
vae: ModelInfo = Field(description="Info to load vae submodel")
|
||||
class VAEField(BaseModel):
|
||||
vae: ModelIdentifierField = Field(description="Info to load vae submodel")
|
||||
seamless_axes: List[str] = Field(default_factory=list, description='Axes("x" and "y") to which apply seamless')
|
||||
|
||||
|
||||
@ -57,14 +76,14 @@ class UNetOutput(BaseInvocationOutput):
|
||||
class VAEOutput(BaseInvocationOutput):
|
||||
"""Base class for invocations that output a VAE field"""
|
||||
|
||||
vae: VaeField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
|
||||
|
||||
@invocation_output("clip_output")
|
||||
class CLIPOutput(BaseInvocationOutput):
|
||||
"""Base class for invocations that output a CLIP field"""
|
||||
|
||||
clip: ClipField = OutputField(description=FieldDescriptions.clip, title="CLIP")
|
||||
clip: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP")
|
||||
|
||||
|
||||
@invocation_output("model_loader_output")
|
||||
@ -74,84 +93,54 @@ class ModelLoaderOutput(UNetOutput, CLIPOutput, VAEOutput):
|
||||
pass
|
||||
|
||||
|
||||
class MainModelField(BaseModel):
|
||||
"""Main model field"""
|
||||
|
||||
key: str = Field(description="Model key")
|
||||
|
||||
|
||||
class LoRAModelField(BaseModel):
|
||||
"""LoRA model field"""
|
||||
|
||||
key: str = Field(description="LoRA model key")
|
||||
|
||||
|
||||
@invocation(
|
||||
"main_model_loader",
|
||||
title="Main Model",
|
||||
tags=["model"],
|
||||
category="model",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class MainModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a main model, outputting its submodels."""
|
||||
|
||||
model: MainModelField = InputField(description=FieldDescriptions.main_model, input=Input.Direct)
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.main_model, input=Input.Direct, ui_type=UIType.MainModel
|
||||
)
|
||||
# TODO: precision?
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ModelLoaderOutput:
|
||||
key = self.model.key
|
||||
|
||||
# TODO: not found exceptions
|
||||
if not context.models.exists(key):
|
||||
raise Exception(f"Unknown model {key}")
|
||||
if not context.models.exists(self.model.key):
|
||||
raise Exception(f"Unknown model {self.model.key}")
|
||||
|
||||
unet = self.model.model_copy(update={"submodel_type": SubModelType.UNet})
|
||||
scheduler = self.model.model_copy(update={"submodel_type": SubModelType.Scheduler})
|
||||
tokenizer = self.model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
|
||||
text_encoder = self.model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
vae = self.model.model_copy(update={"submodel_type": SubModelType.VAE})
|
||||
|
||||
return ModelLoaderOutput(
|
||||
unet=UNetField(
|
||||
unet=ModelInfo(
|
||||
key=key,
|
||||
submodel_type=SubModelType.UNet,
|
||||
),
|
||||
scheduler=ModelInfo(
|
||||
key=key,
|
||||
submodel_type=SubModelType.Scheduler,
|
||||
),
|
||||
loras=[],
|
||||
),
|
||||
clip=ClipField(
|
||||
tokenizer=ModelInfo(
|
||||
key=key,
|
||||
submodel_type=SubModelType.Tokenizer,
|
||||
),
|
||||
text_encoder=ModelInfo(
|
||||
key=key,
|
||||
submodel_type=SubModelType.TextEncoder,
|
||||
),
|
||||
loras=[],
|
||||
skipped_layers=0,
|
||||
),
|
||||
vae=VaeField(
|
||||
vae=ModelInfo(
|
||||
key=key,
|
||||
submodel_type=SubModelType.VAE,
|
||||
),
|
||||
),
|
||||
unet=UNetField(unet=unet, scheduler=scheduler, loras=[]),
|
||||
clip=CLIPField(tokenizer=tokenizer, text_encoder=text_encoder, loras=[], skipped_layers=0),
|
||||
vae=VAEField(vae=vae),
|
||||
)
|
||||
|
||||
|
||||
@invocation_output("lora_loader_output")
|
||||
class LoraLoaderOutput(BaseInvocationOutput):
|
||||
class LoRALoaderOutput(BaseInvocationOutput):
|
||||
"""Model loader output"""
|
||||
|
||||
unet: Optional[UNetField] = OutputField(default=None, description=FieldDescriptions.unet, title="UNet")
|
||||
clip: Optional[ClipField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP")
|
||||
clip: Optional[CLIPField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP")
|
||||
|
||||
|
||||
@invocation("lora_loader", title="LoRA", tags=["model"], category="model", version="1.0.1")
|
||||
class LoraLoaderInvocation(BaseInvocation):
|
||||
@invocation("lora_loader", title="LoRA", tags=["model"], category="model", version="1.0.2")
|
||||
class LoRALoaderInvocation(BaseInvocation):
|
||||
"""Apply selected lora to unet and text_encoder."""
|
||||
|
||||
lora: LoRAModelField = InputField(description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA")
|
||||
lora: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA", ui_type=UIType.LoRAModel
|
||||
)
|
||||
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
default=None,
|
||||
@ -159,46 +148,41 @@ class LoraLoaderInvocation(BaseInvocation):
|
||||
input=Input.Connection,
|
||||
title="UNet",
|
||||
)
|
||||
clip: Optional[ClipField] = InputField(
|
||||
clip: Optional[CLIPField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
title="CLIP",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> LoraLoaderOutput:
|
||||
if self.lora is None:
|
||||
raise Exception("No LoRA provided")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> LoRALoaderOutput:
|
||||
lora_key = self.lora.key
|
||||
|
||||
if not context.models.exists(lora_key):
|
||||
raise Exception(f"Unkown lora: {lora_key}!")
|
||||
|
||||
if self.unet is not None and any(lora.key == lora_key for lora in self.unet.loras):
|
||||
raise Exception(f'Lora "{lora_key}" already applied to unet')
|
||||
if self.unet is not None and any(lora.lora.key == lora_key for lora in self.unet.loras):
|
||||
raise Exception(f'LoRA "{lora_key}" already applied to unet')
|
||||
|
||||
if self.clip is not None and any(lora.key == lora_key for lora in self.clip.loras):
|
||||
raise Exception(f'Lora "{lora_key}" already applied to clip')
|
||||
if self.clip is not None and any(lora.lora.key == lora_key for lora in self.clip.loras):
|
||||
raise Exception(f'LoRA "{lora_key}" already applied to clip')
|
||||
|
||||
output = LoraLoaderOutput()
|
||||
output = LoRALoaderOutput()
|
||||
|
||||
if self.unet is not None:
|
||||
output.unet = copy.deepcopy(self.unet)
|
||||
output.unet = self.unet.model_copy(deep=True)
|
||||
output.unet.loras.append(
|
||||
LoraInfo(
|
||||
key=lora_key,
|
||||
submodel_type=None,
|
||||
LoRAField(
|
||||
lora=self.lora,
|
||||
weight=self.weight,
|
||||
)
|
||||
)
|
||||
|
||||
if self.clip is not None:
|
||||
output.clip = copy.deepcopy(self.clip)
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
output.clip.loras.append(
|
||||
LoraInfo(
|
||||
key=lora_key,
|
||||
submodel_type=None,
|
||||
LoRAField(
|
||||
lora=self.lora,
|
||||
weight=self.weight,
|
||||
)
|
||||
)
|
||||
@ -207,12 +191,12 @@ class LoraLoaderInvocation(BaseInvocation):
|
||||
|
||||
|
||||
@invocation_output("sdxl_lora_loader_output")
|
||||
class SDXLLoraLoaderOutput(BaseInvocationOutput):
|
||||
class SDXLLoRALoaderOutput(BaseInvocationOutput):
|
||||
"""SDXL LoRA Loader Output"""
|
||||
|
||||
unet: Optional[UNetField] = OutputField(default=None, description=FieldDescriptions.unet, title="UNet")
|
||||
clip: Optional[ClipField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP 1")
|
||||
clip2: Optional[ClipField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP 2")
|
||||
clip: Optional[CLIPField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP 1")
|
||||
clip2: Optional[CLIPField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP 2")
|
||||
|
||||
|
||||
@invocation(
|
||||
@ -220,12 +204,14 @@ class SDXLLoraLoaderOutput(BaseInvocationOutput):
|
||||
title="SDXL LoRA",
|
||||
tags=["lora", "model"],
|
||||
category="model",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class SDXLLoraLoaderInvocation(BaseInvocation):
|
||||
class SDXLLoRALoaderInvocation(BaseInvocation):
|
||||
"""Apply selected lora to unet and text_encoder."""
|
||||
|
||||
lora: LoRAModelField = InputField(description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA")
|
||||
lora: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA", ui_type=UIType.LoRAModel
|
||||
)
|
||||
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
default=None,
|
||||
@ -233,65 +219,59 @@ class SDXLLoraLoaderInvocation(BaseInvocation):
|
||||
input=Input.Connection,
|
||||
title="UNet",
|
||||
)
|
||||
clip: Optional[ClipField] = InputField(
|
||||
clip: Optional[CLIPField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
title="CLIP 1",
|
||||
)
|
||||
clip2: Optional[ClipField] = InputField(
|
||||
clip2: Optional[CLIPField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
title="CLIP 2",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> SDXLLoraLoaderOutput:
|
||||
if self.lora is None:
|
||||
raise Exception("No LoRA provided")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> SDXLLoRALoaderOutput:
|
||||
lora_key = self.lora.key
|
||||
|
||||
if not context.models.exists(lora_key):
|
||||
raise Exception(f"Unknown lora: {lora_key}!")
|
||||
|
||||
if self.unet is not None and any(lora.key == lora_key for lora in self.unet.loras):
|
||||
raise Exception(f'Lora "{lora_key}" already applied to unet')
|
||||
if self.unet is not None and any(lora.lora.key == lora_key for lora in self.unet.loras):
|
||||
raise Exception(f'LoRA "{lora_key}" already applied to unet')
|
||||
|
||||
if self.clip is not None and any(lora.key == lora_key for lora in self.clip.loras):
|
||||
raise Exception(f'Lora "{lora_key}" already applied to clip')
|
||||
if self.clip is not None and any(lora.lora.key == lora_key for lora in self.clip.loras):
|
||||
raise Exception(f'LoRA "{lora_key}" already applied to clip')
|
||||
|
||||
if self.clip2 is not None and any(lora.key == lora_key for lora in self.clip2.loras):
|
||||
raise Exception(f'Lora "{lora_key}" already applied to clip2')
|
||||
if self.clip2 is not None and any(lora.lora.key == lora_key for lora in self.clip2.loras):
|
||||
raise Exception(f'LoRA "{lora_key}" already applied to clip2')
|
||||
|
||||
output = SDXLLoraLoaderOutput()
|
||||
output = SDXLLoRALoaderOutput()
|
||||
|
||||
if self.unet is not None:
|
||||
output.unet = copy.deepcopy(self.unet)
|
||||
output.unet = self.unet.model_copy(deep=True)
|
||||
output.unet.loras.append(
|
||||
LoraInfo(
|
||||
key=lora_key,
|
||||
submodel_type=None,
|
||||
LoRAField(
|
||||
lora=self.lora,
|
||||
weight=self.weight,
|
||||
)
|
||||
)
|
||||
|
||||
if self.clip is not None:
|
||||
output.clip = copy.deepcopy(self.clip)
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
output.clip.loras.append(
|
||||
LoraInfo(
|
||||
key=lora_key,
|
||||
submodel_type=None,
|
||||
LoRAField(
|
||||
lora=self.lora,
|
||||
weight=self.weight,
|
||||
)
|
||||
)
|
||||
|
||||
if self.clip2 is not None:
|
||||
output.clip2 = copy.deepcopy(self.clip2)
|
||||
output.clip2 = self.clip2.model_copy(deep=True)
|
||||
output.clip2.loras.append(
|
||||
LoraInfo(
|
||||
key=lora_key,
|
||||
submodel_type=None,
|
||||
LoRAField(
|
||||
lora=self.lora,
|
||||
weight=self.weight,
|
||||
)
|
||||
)
|
||||
@ -299,20 +279,12 @@ class SDXLLoraLoaderInvocation(BaseInvocation):
|
||||
return output
|
||||
|
||||
|
||||
class VAEModelField(BaseModel):
|
||||
"""Vae model field"""
|
||||
|
||||
key: str = Field(description="Model's key")
|
||||
|
||||
|
||||
@invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.1")
|
||||
class VaeLoaderInvocation(BaseInvocation):
|
||||
@invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.2")
|
||||
class VAELoaderInvocation(BaseInvocation):
|
||||
"""Loads a VAE model, outputting a VaeLoaderOutput"""
|
||||
|
||||
vae_model: VAEModelField = InputField(
|
||||
description=FieldDescriptions.vae_model,
|
||||
input=Input.Direct,
|
||||
title="VAE",
|
||||
vae_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.vae_model, input=Input.Direct, title="VAE", ui_type=UIType.VAEModel
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> VAEOutput:
|
||||
@ -321,7 +293,7 @@ class VaeLoaderInvocation(BaseInvocation):
|
||||
if not context.models.exists(key):
|
||||
raise Exception(f"Unkown vae: {key}!")
|
||||
|
||||
return VAEOutput(vae=VaeField(vae=ModelInfo(key=key)))
|
||||
return VAEOutput(vae=VAEField(vae=self.vae_model))
|
||||
|
||||
|
||||
@invocation_output("seamless_output")
|
||||
@ -329,7 +301,7 @@ class SeamlessModeOutput(BaseInvocationOutput):
|
||||
"""Modified Seamless Model output"""
|
||||
|
||||
unet: Optional[UNetField] = OutputField(default=None, description=FieldDescriptions.unet, title="UNet")
|
||||
vae: Optional[VaeField] = OutputField(default=None, description=FieldDescriptions.vae, title="VAE")
|
||||
vae: Optional[VAEField] = OutputField(default=None, description=FieldDescriptions.vae, title="VAE")
|
||||
|
||||
|
||||
@invocation(
|
||||
@ -337,7 +309,7 @@ class SeamlessModeOutput(BaseInvocationOutput):
|
||||
title="Seamless",
|
||||
tags=["seamless", "model"],
|
||||
category="model",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class SeamlessModeInvocation(BaseInvocation):
|
||||
"""Applies the seamless transformation to the Model UNet and VAE."""
|
||||
@ -348,7 +320,7 @@ class SeamlessModeInvocation(BaseInvocation):
|
||||
input=Input.Connection,
|
||||
title="UNet",
|
||||
)
|
||||
vae: Optional[VaeField] = InputField(
|
||||
vae: Optional[VAEField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.vae_model,
|
||||
input=Input.Connection,
|
||||
@ -377,7 +349,7 @@ class SeamlessModeInvocation(BaseInvocation):
|
||||
return SeamlessModeOutput(unet=unet, vae=vae)
|
||||
|
||||
|
||||
@invocation("freeu", title="FreeU", tags=["freeu"], category="unet", version="1.0.0")
|
||||
@invocation("freeu", title="FreeU", tags=["freeu"], category="unet", version="1.0.1")
|
||||
class FreeUInvocation(BaseInvocation):
|
||||
"""
|
||||
Applies FreeU to the UNet. Suggested values (b1/b2/s1/s2):
|
||||
|
@ -81,7 +81,7 @@ class NoiseOutput(BaseInvocationOutput):
|
||||
title="Noise",
|
||||
tags=["latents", "noise"],
|
||||
category="latents",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class NoiseInvocation(BaseInvocation):
|
||||
"""Generates latent noise."""
|
||||
|
@ -51,7 +51,7 @@ from .fields import InputField
|
||||
title="Float Range",
|
||||
tags=["math", "range"],
|
||||
category="math",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class FloatLinearRangeInvocation(BaseInvocation):
|
||||
"""Creates a range"""
|
||||
@ -111,7 +111,7 @@ EASING_FUNCTION_KEYS = Literal[tuple(EASING_FUNCTIONS_MAP.keys())]
|
||||
title="Step Param Easing",
|
||||
tags=["step", "easing"],
|
||||
category="step",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class StepParamEasingInvocation(BaseInvocation):
|
||||
"""Experimental per-step parameter easing for denoising steps"""
|
||||
|
@ -54,7 +54,7 @@ class BooleanCollectionOutput(BaseInvocationOutput):
|
||||
|
||||
|
||||
@invocation(
|
||||
"boolean", title="Boolean Primitive", tags=["primitives", "boolean"], category="primitives", version="1.0.0"
|
||||
"boolean", title="Boolean Primitive", tags=["primitives", "boolean"], category="primitives", version="1.0.1"
|
||||
)
|
||||
class BooleanInvocation(BaseInvocation):
|
||||
"""A boolean primitive value"""
|
||||
@ -70,7 +70,7 @@ class BooleanInvocation(BaseInvocation):
|
||||
title="Boolean Collection Primitive",
|
||||
tags=["primitives", "boolean", "collection"],
|
||||
category="primitives",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class BooleanCollectionInvocation(BaseInvocation):
|
||||
"""A collection of boolean primitive values"""
|
||||
@ -103,7 +103,7 @@ class IntegerCollectionOutput(BaseInvocationOutput):
|
||||
|
||||
|
||||
@invocation(
|
||||
"integer", title="Integer Primitive", tags=["primitives", "integer"], category="primitives", version="1.0.0"
|
||||
"integer", title="Integer Primitive", tags=["primitives", "integer"], category="primitives", version="1.0.1"
|
||||
)
|
||||
class IntegerInvocation(BaseInvocation):
|
||||
"""An integer primitive value"""
|
||||
@ -119,7 +119,7 @@ class IntegerInvocation(BaseInvocation):
|
||||
title="Integer Collection Primitive",
|
||||
tags=["primitives", "integer", "collection"],
|
||||
category="primitives",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class IntegerCollectionInvocation(BaseInvocation):
|
||||
"""A collection of integer primitive values"""
|
||||
@ -151,7 +151,7 @@ class FloatCollectionOutput(BaseInvocationOutput):
|
||||
)
|
||||
|
||||
|
||||
@invocation("float", title="Float Primitive", tags=["primitives", "float"], category="primitives", version="1.0.0")
|
||||
@invocation("float", title="Float Primitive", tags=["primitives", "float"], category="primitives", version="1.0.1")
|
||||
class FloatInvocation(BaseInvocation):
|
||||
"""A float primitive value"""
|
||||
|
||||
@ -166,7 +166,7 @@ class FloatInvocation(BaseInvocation):
|
||||
title="Float Collection Primitive",
|
||||
tags=["primitives", "float", "collection"],
|
||||
category="primitives",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class FloatCollectionInvocation(BaseInvocation):
|
||||
"""A collection of float primitive values"""
|
||||
@ -198,7 +198,7 @@ class StringCollectionOutput(BaseInvocationOutput):
|
||||
)
|
||||
|
||||
|
||||
@invocation("string", title="String Primitive", tags=["primitives", "string"], category="primitives", version="1.0.0")
|
||||
@invocation("string", title="String Primitive", tags=["primitives", "string"], category="primitives", version="1.0.1")
|
||||
class StringInvocation(BaseInvocation):
|
||||
"""A string primitive value"""
|
||||
|
||||
@ -213,7 +213,7 @@ class StringInvocation(BaseInvocation):
|
||||
title="String Collection Primitive",
|
||||
tags=["primitives", "string", "collection"],
|
||||
category="primitives",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class StringCollectionInvocation(BaseInvocation):
|
||||
"""A collection of string primitive values"""
|
||||
@ -255,7 +255,7 @@ class ImageCollectionOutput(BaseInvocationOutput):
|
||||
)
|
||||
|
||||
|
||||
@invocation("image", title="Image Primitive", tags=["primitives", "image"], category="primitives", version="1.0.1")
|
||||
@invocation("image", title="Image Primitive", tags=["primitives", "image"], category="primitives", version="1.0.2")
|
||||
class ImageInvocation(BaseInvocation):
|
||||
"""An image primitive value"""
|
||||
|
||||
@ -276,7 +276,7 @@ class ImageInvocation(BaseInvocation):
|
||||
title="Image Collection Primitive",
|
||||
tags=["primitives", "image", "collection"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class ImageCollectionInvocation(BaseInvocation):
|
||||
"""A collection of image primitive values"""
|
||||
@ -341,7 +341,7 @@ class LatentsCollectionOutput(BaseInvocationOutput):
|
||||
|
||||
|
||||
@invocation(
|
||||
"latents", title="Latents Primitive", tags=["primitives", "latents"], category="primitives", version="1.0.1"
|
||||
"latents", title="Latents Primitive", tags=["primitives", "latents"], category="primitives", version="1.0.2"
|
||||
)
|
||||
class LatentsInvocation(BaseInvocation):
|
||||
"""A latents tensor primitive value"""
|
||||
@ -359,7 +359,7 @@ class LatentsInvocation(BaseInvocation):
|
||||
title="Latents Collection Primitive",
|
||||
tags=["primitives", "latents", "collection"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class LatentsCollectionInvocation(BaseInvocation):
|
||||
"""A collection of latents tensor primitive values"""
|
||||
@ -393,7 +393,7 @@ class ColorCollectionOutput(BaseInvocationOutput):
|
||||
)
|
||||
|
||||
|
||||
@invocation("color", title="Color Primitive", tags=["primitives", "color"], category="primitives", version="1.0.0")
|
||||
@invocation("color", title="Color Primitive", tags=["primitives", "color"], category="primitives", version="1.0.1")
|
||||
class ColorInvocation(BaseInvocation):
|
||||
"""A color primitive value"""
|
||||
|
||||
@ -433,7 +433,7 @@ class ConditioningCollectionOutput(BaseInvocationOutput):
|
||||
title="Conditioning Primitive",
|
||||
tags=["primitives", "conditioning"],
|
||||
category="primitives",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class ConditioningInvocation(BaseInvocation):
|
||||
"""A conditioning tensor primitive value"""
|
||||
@ -449,7 +449,7 @@ class ConditioningInvocation(BaseInvocation):
|
||||
title="Conditioning Collection Primitive",
|
||||
tags=["primitives", "conditioning", "collection"],
|
||||
category="primitives",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class ConditioningCollectionInvocation(BaseInvocation):
|
||||
"""A collection of conditioning tensor primitive values"""
|
||||
|
@ -17,7 +17,7 @@ from .fields import InputField, UIComponent
|
||||
title="Dynamic Prompt",
|
||||
tags=["prompt", "collection"],
|
||||
category="prompt",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
use_cache=False,
|
||||
)
|
||||
class DynamicPromptInvocation(BaseInvocation):
|
||||
@ -46,7 +46,7 @@ class DynamicPromptInvocation(BaseInvocation):
|
||||
title="Prompts from File",
|
||||
tags=["prompt", "file"],
|
||||
category="prompt",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class PromptsFromFileInvocation(BaseInvocation):
|
||||
"""Loads prompts from a text file"""
|
||||
|
@ -8,7 +8,7 @@ from .baseinvocation import (
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from .model import ClipField, MainModelField, ModelInfo, UNetField, VaeField
|
||||
from .model import CLIPField, ModelIdentifierField, UNetField, VAEField
|
||||
|
||||
|
||||
@invocation_output("sdxl_model_loader_output")
|
||||
@ -16,9 +16,9 @@ class SDXLModelLoaderOutput(BaseInvocationOutput):
|
||||
"""SDXL base model loader output"""
|
||||
|
||||
unet: UNetField = OutputField(description=FieldDescriptions.unet, title="UNet")
|
||||
clip: ClipField = OutputField(description=FieldDescriptions.clip, title="CLIP 1")
|
||||
clip2: ClipField = OutputField(description=FieldDescriptions.clip, title="CLIP 2")
|
||||
vae: VaeField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
clip: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP 1")
|
||||
clip2: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP 2")
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
|
||||
|
||||
@invocation_output("sdxl_refiner_model_loader_output")
|
||||
@ -26,15 +26,15 @@ class SDXLRefinerModelLoaderOutput(BaseInvocationOutput):
|
||||
"""SDXL refiner model loader output"""
|
||||
|
||||
unet: UNetField = OutputField(description=FieldDescriptions.unet, title="UNet")
|
||||
clip2: ClipField = OutputField(description=FieldDescriptions.clip, title="CLIP 2")
|
||||
vae: VaeField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
clip2: CLIPField = OutputField(description=FieldDescriptions.clip, title="CLIP 2")
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
|
||||
|
||||
@invocation("sdxl_model_loader", title="SDXL Main Model", tags=["model", "sdxl"], category="model", version="1.0.1")
|
||||
@invocation("sdxl_model_loader", title="SDXL Main Model", tags=["model", "sdxl"], category="model", version="1.0.2")
|
||||
class SDXLModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads an sdxl base model, outputting its submodels."""
|
||||
|
||||
model: MainModelField = InputField(
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.sdxl_main_model, input=Input.Direct, ui_type=UIType.SDXLMainModel
|
||||
)
|
||||
# TODO: precision?
|
||||
@ -46,48 +46,19 @@ class SDXLModelLoaderInvocation(BaseInvocation):
|
||||
if not context.models.exists(model_key):
|
||||
raise Exception(f"Unknown model: {model_key}")
|
||||
|
||||
unet = self.model.model_copy(update={"submodel_type": SubModelType.UNet})
|
||||
scheduler = self.model.model_copy(update={"submodel_type": SubModelType.Scheduler})
|
||||
tokenizer = self.model.model_copy(update={"submodel_type": SubModelType.Tokenizer})
|
||||
text_encoder = self.model.model_copy(update={"submodel_type": SubModelType.TextEncoder})
|
||||
tokenizer2 = self.model.model_copy(update={"submodel_type": SubModelType.Tokenizer2})
|
||||
text_encoder2 = self.model.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
|
||||
vae = self.model.model_copy(update={"submodel_type": SubModelType.VAE})
|
||||
|
||||
return SDXLModelLoaderOutput(
|
||||
unet=UNetField(
|
||||
unet=ModelInfo(
|
||||
key=model_key,
|
||||
submodel_type=SubModelType.UNet,
|
||||
),
|
||||
scheduler=ModelInfo(
|
||||
key=model_key,
|
||||
submodel_type=SubModelType.Scheduler,
|
||||
),
|
||||
loras=[],
|
||||
),
|
||||
clip=ClipField(
|
||||
tokenizer=ModelInfo(
|
||||
key=model_key,
|
||||
submodel_type=SubModelType.Tokenizer,
|
||||
),
|
||||
text_encoder=ModelInfo(
|
||||
key=model_key,
|
||||
submodel_type=SubModelType.TextEncoder,
|
||||
),
|
||||
loras=[],
|
||||
skipped_layers=0,
|
||||
),
|
||||
clip2=ClipField(
|
||||
tokenizer=ModelInfo(
|
||||
key=model_key,
|
||||
submodel_type=SubModelType.Tokenizer2,
|
||||
),
|
||||
text_encoder=ModelInfo(
|
||||
key=model_key,
|
||||
submodel_type=SubModelType.TextEncoder2,
|
||||
),
|
||||
loras=[],
|
||||
skipped_layers=0,
|
||||
),
|
||||
vae=VaeField(
|
||||
vae=ModelInfo(
|
||||
key=model_key,
|
||||
submodel_type=SubModelType.VAE,
|
||||
),
|
||||
),
|
||||
unet=UNetField(unet=unet, scheduler=scheduler, loras=[]),
|
||||
clip=CLIPField(tokenizer=tokenizer, text_encoder=text_encoder, loras=[], skipped_layers=0),
|
||||
clip2=CLIPField(tokenizer=tokenizer2, text_encoder=text_encoder2, loras=[], skipped_layers=0),
|
||||
vae=VAEField(vae=vae),
|
||||
)
|
||||
|
||||
|
||||
@ -96,15 +67,13 @@ class SDXLModelLoaderInvocation(BaseInvocation):
|
||||
title="SDXL Refiner Model",
|
||||
tags=["model", "sdxl", "refiner"],
|
||||
category="model",
|
||||
version="1.0.1",
|
||||
version="1.0.2",
|
||||
)
|
||||
class SDXLRefinerModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads an sdxl refiner model, outputting its submodels."""
|
||||
|
||||
model: MainModelField = InputField(
|
||||
description=FieldDescriptions.sdxl_refiner_model,
|
||||
input=Input.Direct,
|
||||
ui_type=UIType.SDXLRefinerModel,
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.sdxl_refiner_model, input=Input.Direct, ui_type=UIType.SDXLRefinerModel
|
||||
)
|
||||
# TODO: precision?
|
||||
|
||||
@ -115,34 +84,14 @@ class SDXLRefinerModelLoaderInvocation(BaseInvocation):
|
||||
if not context.models.exists(model_key):
|
||||
raise Exception(f"Unknown model: {model_key}")
|
||||
|
||||
unet = self.model.model_copy(update={"submodel_type": SubModelType.UNet})
|
||||
scheduler = self.model.model_copy(update={"submodel_type": SubModelType.Scheduler})
|
||||
tokenizer2 = self.model.model_copy(update={"submodel_type": SubModelType.Tokenizer2})
|
||||
text_encoder2 = self.model.model_copy(update={"submodel_type": SubModelType.TextEncoder2})
|
||||
vae = self.model.model_copy(update={"submodel_type": SubModelType.VAE})
|
||||
|
||||
return SDXLRefinerModelLoaderOutput(
|
||||
unet=UNetField(
|
||||
unet=ModelInfo(
|
||||
key=model_key,
|
||||
submodel_type=SubModelType.UNet,
|
||||
),
|
||||
scheduler=ModelInfo(
|
||||
key=model_key,
|
||||
submodel_type=SubModelType.Scheduler,
|
||||
),
|
||||
loras=[],
|
||||
),
|
||||
clip2=ClipField(
|
||||
tokenizer=ModelInfo(
|
||||
key=model_key,
|
||||
submodel_type=SubModelType.Tokenizer2,
|
||||
),
|
||||
text_encoder=ModelInfo(
|
||||
key=model_key,
|
||||
submodel_type=SubModelType.TextEncoder2,
|
||||
),
|
||||
loras=[],
|
||||
skipped_layers=0,
|
||||
),
|
||||
vae=VaeField(
|
||||
vae=ModelInfo(
|
||||
key=model_key,
|
||||
submodel_type=SubModelType.VAE,
|
||||
),
|
||||
),
|
||||
unet=UNetField(unet=unet, scheduler=scheduler, loras=[]),
|
||||
clip2=CLIPField(tokenizer=tokenizer2, text_encoder=text_encoder2, loras=[], skipped_layers=0),
|
||||
vae=VAEField(vae=vae),
|
||||
)
|
||||
|
@ -27,7 +27,7 @@ class StringPosNegOutput(BaseInvocationOutput):
|
||||
title="String Split Negative",
|
||||
tags=["string", "split", "negative"],
|
||||
category="string",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
)
|
||||
class StringSplitNegInvocation(BaseInvocation):
|
||||
"""Splits string into two strings, inside [] goes into negative string everthing else goes into positive string. Each [ and ] character is replaced with a space"""
|
||||
@ -69,7 +69,7 @@ class String2Output(BaseInvocationOutput):
|
||||
string_2: str = OutputField(description="string 2")
|
||||
|
||||
|
||||
@invocation("string_split", title="String Split", tags=["string", "split"], category="string", version="1.0.0")
|
||||
@invocation("string_split", title="String Split", tags=["string", "split"], category="string", version="1.0.1")
|
||||
class StringSplitInvocation(BaseInvocation):
|
||||
"""Splits string into two strings, based on the first occurance of the delimiter. The delimiter will be removed from the string"""
|
||||
|
||||
@ -89,7 +89,7 @@ class StringSplitInvocation(BaseInvocation):
|
||||
return String2Output(string_1=part1, string_2=part2)
|
||||
|
||||
|
||||
@invocation("string_join", title="String Join", tags=["string", "join"], category="string", version="1.0.0")
|
||||
@invocation("string_join", title="String Join", tags=["string", "join"], category="string", version="1.0.1")
|
||||
class StringJoinInvocation(BaseInvocation):
|
||||
"""Joins string left to string right"""
|
||||
|
||||
@ -100,7 +100,7 @@ class StringJoinInvocation(BaseInvocation):
|
||||
return StringOutput(value=((self.string_left or "") + (self.string_right or "")))
|
||||
|
||||
|
||||
@invocation("string_join_three", title="String Join Three", tags=["string", "join"], category="string", version="1.0.0")
|
||||
@invocation("string_join_three", title="String Join Three", tags=["string", "join"], category="string", version="1.0.1")
|
||||
class StringJoinThreeInvocation(BaseInvocation):
|
||||
"""Joins string left to string middle to string right"""
|
||||
|
||||
@ -113,7 +113,7 @@ class StringJoinThreeInvocation(BaseInvocation):
|
||||
|
||||
|
||||
@invocation(
|
||||
"string_replace", title="String Replace", tags=["string", "replace", "regex"], category="string", version="1.0.0"
|
||||
"string_replace", title="String Replace", tags=["string", "replace", "regex"], category="string", version="1.0.1"
|
||||
)
|
||||
class StringReplaceInvocation(BaseInvocation):
|
||||
"""Replaces the search string with the replace string"""
|
||||
|
@ -9,18 +9,15 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_RESIZE_VALUES
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, OutputField
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
|
||||
|
||||
class T2IAdapterModelField(BaseModel):
|
||||
key: str = Field(description="Model record key for the T2I-Adapter model")
|
||||
|
||||
|
||||
class T2IAdapterField(BaseModel):
|
||||
image: ImageField = Field(description="The T2I-Adapter image prompt.")
|
||||
t2i_adapter_model: T2IAdapterModelField = Field(description="The T2I-Adapter model to use.")
|
||||
t2i_adapter_model: ModelIdentifierField = Field(description="The T2I-Adapter model to use.")
|
||||
weight: Union[float, list[float]] = Field(default=1, description="The weight given to the T2I-Adapter")
|
||||
begin_step_percent: float = Field(
|
||||
default=0, ge=0, le=1, description="When the T2I-Adapter is first applied (% of total steps)"
|
||||
@ -48,18 +45,19 @@ class T2IAdapterOutput(BaseInvocationOutput):
|
||||
|
||||
|
||||
@invocation(
|
||||
"t2i_adapter", title="T2I-Adapter", tags=["t2i_adapter", "control"], category="t2i_adapter", version="1.0.1"
|
||||
"t2i_adapter", title="T2I-Adapter", tags=["t2i_adapter", "control"], category="t2i_adapter", version="1.0.2"
|
||||
)
|
||||
class T2IAdapterInvocation(BaseInvocation):
|
||||
"""Collects T2I-Adapter info to pass to other nodes."""
|
||||
|
||||
# Inputs
|
||||
image: ImageField = InputField(description="The IP-Adapter image prompt.")
|
||||
t2i_adapter_model: T2IAdapterModelField = InputField(
|
||||
t2i_adapter_model: ModelIdentifierField = InputField(
|
||||
description="The T2I-Adapter model.",
|
||||
title="T2I-Adapter Model",
|
||||
input=Input.Direct,
|
||||
ui_order=-1,
|
||||
ui_type=UIType.T2IAdapterModel,
|
||||
)
|
||||
weight: Union[float, list[float]] = InputField(
|
||||
default=1, ge=0, description="The weight given to the T2I-Adapter", title="Weight"
|
||||
|
@ -39,7 +39,7 @@ class CalculateImageTilesOutput(BaseInvocationOutput):
|
||||
title="Calculate Image Tiles",
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class CalculateImageTilesInvocation(BaseInvocation):
|
||||
@ -73,7 +73,7 @@ class CalculateImageTilesInvocation(BaseInvocation):
|
||||
title="Calculate Image Tiles Even Split",
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.1.0",
|
||||
version="1.1.1",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class CalculateImageTilesEvenSplitInvocation(BaseInvocation):
|
||||
@ -116,7 +116,7 @@ class CalculateImageTilesEvenSplitInvocation(BaseInvocation):
|
||||
title="Calculate Image Tiles Minimum Overlap",
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class CalculateImageTilesMinimumOverlapInvocation(BaseInvocation):
|
||||
@ -167,7 +167,7 @@ class TileToPropertiesOutput(BaseInvocationOutput):
|
||||
title="Tile to Properties",
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class TileToPropertiesInvocation(BaseInvocation):
|
||||
@ -200,7 +200,7 @@ class PairTileImageOutput(BaseInvocationOutput):
|
||||
title="Pair Tile with Image",
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.0.0",
|
||||
version="1.0.1",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class PairTileImageInvocation(BaseInvocation):
|
||||
@ -229,7 +229,7 @@ BLEND_MODES = Literal["Linear", "Seam"]
|
||||
title="Merge Tiles to Image",
|
||||
tags=["tiles"],
|
||||
category="tiles",
|
||||
version="1.1.0",
|
||||
version="1.1.1",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class MergeTilesToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
@ -11,6 +11,7 @@ from pydantic import ConfigDict
|
||||
from invokeai.app.invocations.fields import ImageField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.download_with_progress import download_with_progress_bar
|
||||
from invokeai.backend.image_util.basicsr.rrdbnet_arch import RRDBNet
|
||||
from invokeai.backend.image_util.realesrgan.realesrgan import RealESRGAN
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
@ -27,11 +28,18 @@ ESRGAN_MODELS = Literal[
|
||||
"RealESRGAN_x2plus.pth",
|
||||
]
|
||||
|
||||
ESRGAN_MODEL_URLS: dict[str, str] = {
|
||||
"RealESRGAN_x4plus.pth": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
|
||||
"RealESRGAN_x4plus_anime_6B.pth": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
|
||||
"ESRGAN_SRx4_DF2KOST_official.pth": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
|
||||
"RealESRGAN_x2plus.pth": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
|
||||
}
|
||||
|
||||
if choose_torch_device() == torch.device("mps"):
|
||||
from torch import mps
|
||||
|
||||
|
||||
@invocation("esrgan", title="Upscale (RealESRGAN)", tags=["esrgan", "upscale"], category="esrgan", version="1.3.1")
|
||||
@invocation("esrgan", title="Upscale (RealESRGAN)", tags=["esrgan", "upscale"], category="esrgan", version="1.3.2")
|
||||
class ESRGANInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Upscales an image using RealESRGAN."""
|
||||
|
||||
@ -45,7 +53,6 @@ class ESRGANInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
models_path = context.config.get().models_path
|
||||
|
||||
rrdbnet_model = None
|
||||
netscale = None
|
||||
@ -92,11 +99,16 @@ class ESRGANInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
context.logger.error(msg)
|
||||
raise ValueError(msg)
|
||||
|
||||
esrgan_model_path = Path(f"core/upscaling/realesrgan/{self.model_name}")
|
||||
esrgan_model_path = Path(context.config.get().models_path, f"core/upscaling/realesrgan/{self.model_name}")
|
||||
|
||||
# Downloads the ESRGAN model if it doesn't already exist
|
||||
download_with_progress_bar(
|
||||
name=self.model_name, url=ESRGAN_MODEL_URLS[self.model_name], dest_path=esrgan_model_path
|
||||
)
|
||||
|
||||
upscaler = RealESRGAN(
|
||||
scale=netscale,
|
||||
model_path=models_path / esrgan_model_path,
|
||||
model_path=esrgan_model_path,
|
||||
model=rrdbnet_model,
|
||||
half=False,
|
||||
tile=self.tile_size,
|
||||
|
12
invokeai/app/run_app.py
Normal file
12
invokeai/app/run_app.py
Normal file
@ -0,0 +1,12 @@
|
||||
"""This is a wrapper around the main app entrypoint, to allow for CLI args to be parsed before running the app."""
|
||||
|
||||
|
||||
def run_app() -> None:
|
||||
# Before doing _anything_, parse CLI args!
|
||||
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||
|
||||
InvokeAIArgs.parse_args()
|
||||
|
||||
from invokeai.app.api_app import invoke_api
|
||||
|
||||
invoke_api()
|
@ -2,6 +2,6 @@
|
||||
|
||||
from invokeai.app.services.config.config_common import PagingArgumentParser
|
||||
|
||||
from .config_default import InvokeAIAppConfig, get_invokeai_config
|
||||
from .config_default import InvokeAIAppConfig, get_config
|
||||
|
||||
__all__ = ["InvokeAIAppConfig", "get_invokeai_config", "PagingArgumentParser"]
|
||||
__all__ = ["InvokeAIAppConfig", "get_config", "PagingArgumentParser"]
|
||||
|
@ -1,224 +0,0 @@
|
||||
# Copyright (c) 2023 Lincoln Stein (https://github.com/lstein) and the InvokeAI Development Team
|
||||
|
||||
"""
|
||||
Base class for the InvokeAI configuration system.
|
||||
It defines a type of pydantic BaseSettings object that
|
||||
is able to read and write from an omegaconf-based config file,
|
||||
with overriding of settings from environment variables and/or
|
||||
the command line.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
from argparse import ArgumentParser
|
||||
from pathlib import Path
|
||||
from typing import Any, ClassVar, Dict, List, Literal, Optional, Union, get_args, get_origin, get_type_hints
|
||||
|
||||
from omegaconf import DictConfig, ListConfig, OmegaConf
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
from invokeai.app.services.config.config_common import PagingArgumentParser, int_or_float_or_str
|
||||
|
||||
|
||||
class InvokeAISettings(BaseSettings):
|
||||
"""Runtime configuration settings in which default values are read from an omegaconf .yaml file."""
|
||||
|
||||
initconf: ClassVar[Optional[DictConfig]] = None
|
||||
argparse_groups: ClassVar[Dict[str, Any]] = {}
|
||||
|
||||
model_config = SettingsConfigDict(env_file_encoding="utf-8", arbitrary_types_allowed=True, case_sensitive=True)
|
||||
|
||||
def parse_args(self, argv: Optional[List[str]] = sys.argv[1:]) -> None:
|
||||
"""Call to parse command-line arguments."""
|
||||
parser = self.get_parser()
|
||||
opt, unknown_opts = parser.parse_known_args(argv)
|
||||
if len(unknown_opts) > 0:
|
||||
print("Unknown args:", unknown_opts)
|
||||
for name in self.model_fields:
|
||||
if name not in self._excluded():
|
||||
value = getattr(opt, name)
|
||||
if isinstance(value, ListConfig):
|
||||
value = list(value)
|
||||
elif isinstance(value, DictConfig):
|
||||
value = dict(value)
|
||||
setattr(self, name, value)
|
||||
|
||||
def to_yaml(self) -> str:
|
||||
"""Return a YAML string representing our settings. This can be used as the contents of `invokeai.yaml` to restore settings later."""
|
||||
cls = self.__class__
|
||||
type = get_args(get_type_hints(cls)["type"])[0]
|
||||
field_dict: Dict[str, Dict[str, Any]] = {type: {}}
|
||||
for name, field in self.model_fields.items():
|
||||
if name in cls._excluded_from_yaml():
|
||||
continue
|
||||
assert isinstance(field.json_schema_extra, dict)
|
||||
category = (
|
||||
field.json_schema_extra.get("category", "Uncategorized") if field.json_schema_extra else "Uncategorized"
|
||||
)
|
||||
value = getattr(self, name)
|
||||
assert isinstance(category, str)
|
||||
if category not in field_dict[type]:
|
||||
field_dict[type][category] = {}
|
||||
# keep paths as strings to make it easier to read
|
||||
field_dict[type][category][name] = str(value) if isinstance(value, Path) else value
|
||||
conf = OmegaConf.create(field_dict)
|
||||
return OmegaConf.to_yaml(conf)
|
||||
|
||||
@classmethod
|
||||
def add_parser_arguments(cls, parser: ArgumentParser) -> None:
|
||||
"""Dynamically create arguments for a settings parser."""
|
||||
if "type" in get_type_hints(cls):
|
||||
settings_stanza = get_args(get_type_hints(cls)["type"])[0]
|
||||
else:
|
||||
settings_stanza = "Uncategorized"
|
||||
|
||||
env_prefix = getattr(cls.model_config, "env_prefix", None)
|
||||
env_prefix = env_prefix if env_prefix is not None else settings_stanza.upper()
|
||||
|
||||
initconf = (
|
||||
cls.initconf.get(settings_stanza)
|
||||
if cls.initconf and settings_stanza in cls.initconf
|
||||
else OmegaConf.create()
|
||||
)
|
||||
|
||||
# create an upcase version of the environment in
|
||||
# order to achieve case-insensitive environment
|
||||
# variables (the way Windows does)
|
||||
upcase_environ = {}
|
||||
for key, value in os.environ.items():
|
||||
upcase_environ[key.upper()] = value
|
||||
|
||||
fields = cls.model_fields
|
||||
cls.argparse_groups = {}
|
||||
|
||||
for name, field in fields.items():
|
||||
if name not in cls._excluded():
|
||||
current_default = field.default
|
||||
|
||||
category = (
|
||||
field.json_schema_extra.get("category", "Uncategorized")
|
||||
if field.json_schema_extra
|
||||
else "Uncategorized"
|
||||
)
|
||||
env_name = env_prefix + "_" + name
|
||||
if category in initconf and name in initconf.get(category):
|
||||
field.default = initconf.get(category).get(name)
|
||||
if env_name.upper() in upcase_environ:
|
||||
field.default = upcase_environ[env_name.upper()]
|
||||
cls.add_field_argument(parser, name, field)
|
||||
|
||||
field.default = current_default
|
||||
|
||||
@classmethod
|
||||
def cmd_name(cls, command_field: str = "type") -> str:
|
||||
"""Return the category of a setting."""
|
||||
hints = get_type_hints(cls)
|
||||
if command_field in hints:
|
||||
result: str = get_args(hints[command_field])[0]
|
||||
return result
|
||||
else:
|
||||
return "Uncategorized"
|
||||
|
||||
@classmethod
|
||||
def get_parser(cls) -> ArgumentParser:
|
||||
"""Get the command-line parser for a setting."""
|
||||
parser = PagingArgumentParser(
|
||||
prog=cls.cmd_name(),
|
||||
description=cls.__doc__,
|
||||
)
|
||||
cls.add_parser_arguments(parser)
|
||||
return parser
|
||||
|
||||
@classmethod
|
||||
def _excluded(cls) -> List[str]:
|
||||
# internal fields that shouldn't be exposed as command line options
|
||||
return ["type", "initconf"]
|
||||
|
||||
@classmethod
|
||||
def _excluded_from_yaml(cls) -> List[str]:
|
||||
# combination of deprecated parameters and internal ones that shouldn't be exposed as invokeai.yaml options
|
||||
return [
|
||||
"type",
|
||||
"initconf",
|
||||
"version",
|
||||
"from_file",
|
||||
"model",
|
||||
"root",
|
||||
"max_cache_size",
|
||||
"max_vram_cache_size",
|
||||
"always_use_cpu",
|
||||
"free_gpu_mem",
|
||||
"xformers_enabled",
|
||||
"tiled_decode",
|
||||
"lora_dir",
|
||||
"embedding_dir",
|
||||
"controlnet_dir",
|
||||
"conf_path",
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def add_field_argument(cls, command_parser, name: str, field, default_override=None) -> None:
|
||||
"""Add the argparse arguments for a setting parser."""
|
||||
field_type = get_type_hints(cls).get(name)
|
||||
default = (
|
||||
default_override
|
||||
if default_override is not None
|
||||
else field.default
|
||||
if field.default_factory is None
|
||||
else field.default_factory()
|
||||
)
|
||||
if category := (field.json_schema_extra.get("category", None) if field.json_schema_extra else None):
|
||||
if category not in cls.argparse_groups:
|
||||
cls.argparse_groups[category] = command_parser.add_argument_group(category)
|
||||
argparse_group = cls.argparse_groups[category]
|
||||
else:
|
||||
argparse_group = command_parser
|
||||
|
||||
if get_origin(field_type) == Literal:
|
||||
allowed_values = get_args(field.annotation)
|
||||
allowed_types = set()
|
||||
for val in allowed_values:
|
||||
allowed_types.add(type(val))
|
||||
allowed_types_list = list(allowed_types)
|
||||
field_type = allowed_types_list[0] if len(allowed_types) == 1 else int_or_float_or_str
|
||||
|
||||
argparse_group.add_argument(
|
||||
f"--{name}",
|
||||
dest=name,
|
||||
type=field_type,
|
||||
default=default,
|
||||
choices=allowed_values,
|
||||
help=field.description,
|
||||
)
|
||||
|
||||
elif get_origin(field_type) == Union:
|
||||
argparse_group.add_argument(
|
||||
f"--{name}",
|
||||
dest=name,
|
||||
type=int_or_float_or_str,
|
||||
default=default,
|
||||
help=field.description,
|
||||
)
|
||||
|
||||
elif get_origin(field_type) == list:
|
||||
argparse_group.add_argument(
|
||||
f"--{name}",
|
||||
dest=name,
|
||||
nargs="*",
|
||||
type=field.annotation,
|
||||
default=default,
|
||||
action=argparse.BooleanOptionalAction if field.annotation == bool else "store",
|
||||
help=field.description,
|
||||
)
|
||||
else:
|
||||
argparse_group.add_argument(
|
||||
f"--{name}",
|
||||
dest=name,
|
||||
type=field.annotation,
|
||||
default=default,
|
||||
action=argparse.BooleanOptionalAction if field.annotation == bool else "store",
|
||||
help=field.description,
|
||||
)
|
@ -12,7 +12,6 @@ from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import pydoc
|
||||
from typing import Union
|
||||
|
||||
|
||||
class PagingArgumentParser(argparse.ArgumentParser):
|
||||
@ -24,18 +23,3 @@ class PagingArgumentParser(argparse.ArgumentParser):
|
||||
def print_help(self, file=None) -> None:
|
||||
text = self.format_help()
|
||||
pydoc.pager(text)
|
||||
|
||||
|
||||
def int_or_float_or_str(value: str) -> Union[int, float, str]:
|
||||
"""
|
||||
Workaround for argparse type checking.
|
||||
"""
|
||||
try:
|
||||
return int(value)
|
||||
except Exception as e: # noqa F841
|
||||
pass
|
||||
try:
|
||||
return float(value)
|
||||
except Exception as e: # noqa F841
|
||||
pass
|
||||
return str(value)
|
||||
|
@ -1,184 +1,23 @@
|
||||
# Copyright (c) 2023 Lincoln Stein (https://github.com/lstein) and the InvokeAI Development Team
|
||||
|
||||
"""Invokeai configuration system.
|
||||
|
||||
Arguments and fields are taken from the pydantic definition of the
|
||||
model. Defaults can be set by creating a yaml configuration file that
|
||||
has a top-level key of "InvokeAI" and subheadings for each of the
|
||||
categories returned by `invokeai --help`. The file looks like this:
|
||||
|
||||
[file: invokeai.yaml]
|
||||
|
||||
InvokeAI:
|
||||
Web Server:
|
||||
host: 127.0.0.1
|
||||
port: 9090
|
||||
allow_origins: []
|
||||
allow_credentials: true
|
||||
allow_methods:
|
||||
- '*'
|
||||
allow_headers:
|
||||
- '*'
|
||||
Features:
|
||||
esrgan: true
|
||||
internet_available: true
|
||||
log_tokenization: false
|
||||
patchmatch: true
|
||||
ignore_missing_core_models: false
|
||||
Paths:
|
||||
autoimport_dir: autoimport
|
||||
lora_dir: null
|
||||
embedding_dir: null
|
||||
controlnet_dir: null
|
||||
models_dir: models
|
||||
legacy_conf_dir: configs/stable-diffusion
|
||||
db_dir: databases
|
||||
outdir: /home/lstein/invokeai-main/outputs
|
||||
use_memory_db: false
|
||||
Logging:
|
||||
log_handlers:
|
||||
- console
|
||||
log_format: plain
|
||||
log_level: info
|
||||
Model Cache:
|
||||
ram: 13.5
|
||||
vram: 0.25
|
||||
lazy_offload: true
|
||||
log_memory_usage: false
|
||||
Device:
|
||||
device: auto
|
||||
precision: auto
|
||||
Generation:
|
||||
sequential_guidance: false
|
||||
attention_type: xformers
|
||||
attention_slice_size: auto
|
||||
force_tiled_decode: false
|
||||
|
||||
The default name of the configuration file is `invokeai.yaml`, located
|
||||
in INVOKEAI_ROOT. You can replace supersede this by providing any
|
||||
OmegaConf dictionary object initialization time:
|
||||
|
||||
omegaconf = OmegaConf.load('/tmp/init.yaml')
|
||||
conf = InvokeAIAppConfig()
|
||||
conf.parse_args(conf=omegaconf)
|
||||
|
||||
InvokeAIAppConfig.parse_args() will parse the contents of `sys.argv`
|
||||
at initialization time. You may pass a list of strings in the optional
|
||||
`argv` argument to use instead of the system argv:
|
||||
|
||||
conf.parse_args(argv=['--log_tokenization'])
|
||||
|
||||
It is also possible to set a value at initialization time. However, if
|
||||
you call parse_args() it may be overwritten.
|
||||
|
||||
conf = InvokeAIAppConfig(log_tokenization=True)
|
||||
conf.parse_args(argv=['--no-log_tokenization'])
|
||||
conf.log_tokenization
|
||||
# False
|
||||
|
||||
To avoid this, use `get_config()` to retrieve the application-wide
|
||||
configuration object. This will retain any properties set at object
|
||||
creation time:
|
||||
|
||||
conf = InvokeAIAppConfig.get_config(log_tokenization=True)
|
||||
conf.parse_args(argv=['--no-log_tokenization'])
|
||||
conf.log_tokenization
|
||||
# True
|
||||
|
||||
Any setting can be overwritten by setting an environment variable of
|
||||
form: "INVOKEAI_<setting>", as in:
|
||||
|
||||
export INVOKEAI_port=8080
|
||||
|
||||
Order of precedence (from highest):
|
||||
1) initialization options
|
||||
2) command line options
|
||||
3) environment variable options
|
||||
4) config file options
|
||||
5) pydantic defaults
|
||||
|
||||
Typical usage at the top level file:
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
# get global configuration and print its cache size
|
||||
conf = InvokeAIAppConfig.get_config()
|
||||
conf.parse_args()
|
||||
print(conf.ram_cache_size)
|
||||
|
||||
Typical usage in a backend module:
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
# get global configuration and print its cache size value
|
||||
conf = InvokeAIAppConfig.get_config()
|
||||
print(conf.ram_cache_size)
|
||||
|
||||
Computed properties:
|
||||
|
||||
The InvokeAIAppConfig object has a series of properties that
|
||||
resolve paths relative to the runtime root directory. They each return
|
||||
a Path object:
|
||||
|
||||
root_path - path to InvokeAI root
|
||||
output_path - path to default outputs directory
|
||||
conf - alias for the above
|
||||
embedding_path - path to the embeddings directory
|
||||
lora_path - path to the LoRA directory
|
||||
|
||||
In most cases, you will want to create a single InvokeAIAppConfig
|
||||
object for the entire application. The InvokeAIAppConfig.get_config() function
|
||||
does this:
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
config.parse_args() # read values from the command line/config file
|
||||
print(config.root)
|
||||
|
||||
# Subclassing
|
||||
|
||||
If you wish to create a similar class, please subclass the
|
||||
`InvokeAISettings` class and define a Literal field named "type",
|
||||
which is set to the desired top-level name. For example, to create a
|
||||
"InvokeBatch" configuration, define like this:
|
||||
|
||||
class InvokeBatch(InvokeAISettings):
|
||||
type: Literal["InvokeBatch"] = "InvokeBatch"
|
||||
node_count : int = Field(default=1, description="Number of nodes to run on", json_schema_extra=dict(category='Resources'))
|
||||
cpu_count : int = Field(default=8, description="Number of GPUs to run on per node", json_schema_extra=dict(category='Resources'))
|
||||
|
||||
This will now read and write from the "InvokeBatch" section of the
|
||||
config file, look for environment variables named INVOKEBATCH_*, and
|
||||
accept the command-line arguments `--node_count` and `--cpu_count`. The
|
||||
two configs are kept in separate sections of the config file:
|
||||
|
||||
# invokeai.yaml
|
||||
|
||||
InvokeBatch:
|
||||
Resources:
|
||||
node_count: 1
|
||||
cpu_count: 8
|
||||
|
||||
InvokeAI:
|
||||
Paths:
|
||||
root: /home/lstein/invokeai-main
|
||||
legacy_conf_dir: configs/stable-diffusion
|
||||
outdir: outputs
|
||||
...
|
||||
|
||||
"""
|
||||
# TODO(psyche): pydantic-settings supports YAML settings sources. If we can figure out a way to integrate the YAML
|
||||
# migration logic, we could use that for simpler config loading.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import Any, ClassVar, Dict, List, Literal, Optional
|
||||
from typing import Any, Literal, Optional
|
||||
|
||||
from omegaconf import DictConfig, OmegaConf
|
||||
from pydantic import Field
|
||||
from pydantic.config import JsonDict
|
||||
from pydantic_settings import SettingsConfigDict
|
||||
import psutil
|
||||
import yaml
|
||||
from pydantic import BaseModel, Field, PrivateAttr, field_validator
|
||||
from pydantic_settings import BaseSettings, PydanticBaseSettingsSource, SettingsConfigDict
|
||||
|
||||
from .config_base import InvokeAISettings
|
||||
import invokeai.configs as model_configs
|
||||
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS
|
||||
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||
|
||||
INIT_FILE = Path("invokeai.yaml")
|
||||
DB_FILE = Path("invokeai.db")
|
||||
@ -186,309 +25,463 @@ LEGACY_INIT_FILE = Path("invokeai.init")
|
||||
DEFAULT_RAM_CACHE = 10.0
|
||||
DEFAULT_VRAM_CACHE = 0.25
|
||||
DEFAULT_CONVERT_CACHE = 20.0
|
||||
DEVICE = Literal["auto", "cpu", "cuda", "cuda:1", "mps"]
|
||||
PRECISION = Literal["auto", "float16", "bfloat16", "float32", "autocast"]
|
||||
ATTENTION_TYPE = Literal["auto", "normal", "xformers", "sliced", "torch-sdp"]
|
||||
ATTENTION_SLICE_SIZE = Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8]
|
||||
LOG_FORMAT = Literal["plain", "color", "syslog", "legacy"]
|
||||
LOG_LEVEL = Literal["debug", "info", "warning", "error", "critical"]
|
||||
CONFIG_SCHEMA_VERSION = "4.0.0"
|
||||
|
||||
|
||||
class Categories(object):
|
||||
"""Category headers for configuration variable groups."""
|
||||
def get_default_ram_cache_size() -> float:
|
||||
"""Run a heuristic for the default RAM cache based on installed RAM."""
|
||||
|
||||
WebServer: JsonDict = {"category": "Web Server"}
|
||||
Features: JsonDict = {"category": "Features"}
|
||||
Paths: JsonDict = {"category": "Paths"}
|
||||
Logging: JsonDict = {"category": "Logging"}
|
||||
Development: JsonDict = {"category": "Development"}
|
||||
Other: JsonDict = {"category": "Other"}
|
||||
ModelCache: JsonDict = {"category": "Model Cache"}
|
||||
Device: JsonDict = {"category": "Device"}
|
||||
Generation: JsonDict = {"category": "Generation"}
|
||||
Queue: JsonDict = {"category": "Queue"}
|
||||
Nodes: JsonDict = {"category": "Nodes"}
|
||||
MemoryPerformance: JsonDict = {"category": "Memory/Performance"}
|
||||
# On some machines, psutil.virtual_memory().total gives a value that is slightly less than the actual RAM, so the
|
||||
# limits are set slightly lower than than what we expect the actual RAM to be.
|
||||
|
||||
GB = 1024**3
|
||||
max_ram = psutil.virtual_memory().total / GB
|
||||
|
||||
if max_ram >= 60:
|
||||
return 15.0
|
||||
if max_ram >= 30:
|
||||
return 7.5
|
||||
if max_ram >= 14:
|
||||
return 4.0
|
||||
return 2.1 # 2.1 is just large enough for sd 1.5 ;-)
|
||||
|
||||
|
||||
class InvokeAIAppConfig(InvokeAISettings):
|
||||
"""Configuration object for InvokeAI App."""
|
||||
class URLRegexTokenPair(BaseModel):
|
||||
url_regex: str = Field(description="Regular expression to match against the URL")
|
||||
token: str = Field(description="Token to use when the URL matches the regex")
|
||||
|
||||
singleton_config: ClassVar[Optional[InvokeAIAppConfig]] = None
|
||||
singleton_init: ClassVar[Optional[Dict[str, Any]]] = None
|
||||
@field_validator("url_regex")
|
||||
@classmethod
|
||||
def validate_url_regex(cls, v: str) -> str:
|
||||
"""Validate that the value is a valid regex."""
|
||||
try:
|
||||
re.compile(v)
|
||||
except re.error as e:
|
||||
raise ValueError(f"Invalid regex: {e}")
|
||||
return v
|
||||
|
||||
|
||||
class InvokeAIAppConfig(BaseSettings):
|
||||
"""Invoke's global app configuration.
|
||||
|
||||
Typically, you won't need to interact with this class directly. Instead, use the `get_config` function from `invokeai.app.services.config` to get a singleton config object.
|
||||
|
||||
Attributes:
|
||||
host: IP address to bind to. Use `0.0.0.0` to serve to your local network.
|
||||
port: Port to bind to.
|
||||
allow_origins: Allowed CORS origins.
|
||||
allow_credentials: Allow CORS credentials.
|
||||
allow_methods: Methods allowed for CORS.
|
||||
allow_headers: Headers allowed for CORS.
|
||||
ssl_certfile: SSL certificate file for HTTPS. See https://www.uvicorn.org/settings/#https.
|
||||
ssl_keyfile: SSL key file for HTTPS. See https://www.uvicorn.org/settings/#https.
|
||||
log_tokenization: Enable logging of parsed prompt tokens.
|
||||
patchmatch: Enable patchmatch inpaint code.
|
||||
autoimport_dir: Path to a directory of models files to be imported on startup.
|
||||
models_dir: Path to the models directory.
|
||||
convert_cache_dir: Path to the converted models cache directory. When loading a non-diffusers model, it will be converted and store on disk at this location.
|
||||
legacy_conf_dir: Path to directory of legacy checkpoint config files.
|
||||
db_dir: Path to InvokeAI databases directory.
|
||||
outputs_dir: Path to directory for outputs.
|
||||
custom_nodes_dir: Path to directory for custom nodes.
|
||||
log_handlers: Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>".
|
||||
log_format: Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style.<br>Valid values: `plain`, `color`, `syslog`, `legacy`
|
||||
log_level: Emit logging messages at this level or higher.<br>Valid values: `debug`, `info`, `warning`, `error`, `critical`
|
||||
log_sql: Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose.
|
||||
use_memory_db: Use in-memory database. Useful for development.
|
||||
dev_reload: Automatically reload when Python sources are changed. Does not reload node definitions.
|
||||
profile_graphs: Enable graph profiling using `cProfile`.
|
||||
profile_prefix: An optional prefix for profile output files.
|
||||
profiles_dir: Path to profiles output directory.
|
||||
ram: Maximum memory amount used by memory model cache for rapid switching (GB).
|
||||
vram: Amount of VRAM reserved for model storage (GB).
|
||||
convert_cache: Maximum size of on-disk converted models cache (GB).
|
||||
lazy_offload: Keep models in VRAM until their space is needed.
|
||||
log_memory_usage: If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.
|
||||
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
|
||||
precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.<br>Valid values: `auto`, `float16`, `bfloat16`, `float32`, `autocast`
|
||||
sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
|
||||
attention_type: Attention type.<br>Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp`
|
||||
attention_slice_size: Slice size, valid when attention_type=="sliced".<br>Valid values: `auto`, `balanced`, `max`, `1`, `2`, `3`, `4`, `5`, `6`, `7`, `8`
|
||||
force_tiled_decode: Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty).
|
||||
pil_compress_level: The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = no compression, 1 = fastest with slightly larger filesize, 9 = slowest with smallest filesize. 1 is typically the best setting.
|
||||
max_queue_size: Maximum number of items in the session queue.
|
||||
allow_nodes: List of nodes to allow. Omit to allow all.
|
||||
deny_nodes: List of nodes to deny. Omit to deny none.
|
||||
node_cache_size: How many cached nodes to keep in memory.
|
||||
hashing_algorithm: Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.<br>Valid values: `blake3_multi`, `blake3_single`, `random`, `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, `blake2b`, `blake2s`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, `shake_128`, `shake_256`
|
||||
remote_api_tokens: List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.
|
||||
"""
|
||||
|
||||
_root: Optional[Path] = PrivateAttr(default=None)
|
||||
_config_file: Optional[Path] = PrivateAttr(default=None)
|
||||
|
||||
# fmt: off
|
||||
type: Literal["InvokeAI"] = "InvokeAI"
|
||||
|
||||
# INTERNAL
|
||||
schema_version: str = Field(default=CONFIG_SCHEMA_VERSION, description="Schema version of the config file. This is not a user-configurable setting.")
|
||||
# This is only used during v3 models.yaml migration
|
||||
legacy_models_yaml_path: Optional[Path] = Field(default=None, description="Path to the legacy models.yaml file. This is not a user-configurable setting.")
|
||||
|
||||
# WEB
|
||||
host : str = Field(default="127.0.0.1", description="IP address to bind to", json_schema_extra=Categories.WebServer)
|
||||
port : int = Field(default=9090, description="Port to bind to", json_schema_extra=Categories.WebServer)
|
||||
allow_origins : List[str] = Field(default=[], description="Allowed CORS origins", json_schema_extra=Categories.WebServer)
|
||||
allow_credentials : bool = Field(default=True, description="Allow CORS credentials", json_schema_extra=Categories.WebServer)
|
||||
allow_methods : List[str] = Field(default=["*"], description="Methods allowed for CORS", json_schema_extra=Categories.WebServer)
|
||||
allow_headers : List[str] = Field(default=["*"], description="Headers allowed for CORS", json_schema_extra=Categories.WebServer)
|
||||
# SSL options correspond to https://www.uvicorn.org/settings/#https
|
||||
ssl_certfile : Optional[Path] = Field(default=None, description="SSL certificate file (for HTTPS)", json_schema_extra=Categories.WebServer)
|
||||
ssl_keyfile : Optional[Path] = Field(default=None, description="SSL key file", json_schema_extra=Categories.WebServer)
|
||||
host: str = Field(default="127.0.0.1", description="IP address to bind to. Use `0.0.0.0` to serve to your local network.")
|
||||
port: int = Field(default=9090, description="Port to bind to.")
|
||||
allow_origins: list[str] = Field(default=[], description="Allowed CORS origins.")
|
||||
allow_credentials: bool = Field(default=True, description="Allow CORS credentials.")
|
||||
allow_methods: list[str] = Field(default=["*"], description="Methods allowed for CORS.")
|
||||
allow_headers: list[str] = Field(default=["*"], description="Headers allowed for CORS.")
|
||||
ssl_certfile: Optional[Path] = Field(default=None, description="SSL certificate file for HTTPS. See https://www.uvicorn.org/settings/#https.")
|
||||
ssl_keyfile: Optional[Path] = Field(default=None, description="SSL key file for HTTPS. See https://www.uvicorn.org/settings/#https.")
|
||||
|
||||
# FEATURES
|
||||
esrgan : bool = Field(default=True, description="Enable/disable upscaling code", json_schema_extra=Categories.Features)
|
||||
internet_available : bool = Field(default=True, description="If true, attempt to download models on the fly; otherwise only use local models", json_schema_extra=Categories.Features)
|
||||
log_tokenization : bool = Field(default=False, description="Enable logging of parsed prompt tokens.", json_schema_extra=Categories.Features)
|
||||
patchmatch : bool = Field(default=True, description="Enable/disable patchmatch inpaint code", json_schema_extra=Categories.Features)
|
||||
ignore_missing_core_models : bool = Field(default=False, description='Ignore missing models in models/core/convert', json_schema_extra=Categories.Features)
|
||||
# MISC FEATURES
|
||||
log_tokenization: bool = Field(default=False, description="Enable logging of parsed prompt tokens.")
|
||||
patchmatch: bool = Field(default=True, description="Enable patchmatch inpaint code.")
|
||||
|
||||
# PATHS
|
||||
root : Optional[Path] = Field(default=None, description='InvokeAI runtime root directory', json_schema_extra=Categories.Paths)
|
||||
autoimport_dir : Path = Field(default=Path('autoimport'), description='Path to a directory of models files to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
models_dir : Path = Field(default=Path('models'), description='Path to the models directory', json_schema_extra=Categories.Paths)
|
||||
convert_cache_dir : Path = Field(default=Path('models/.cache'), description='Path to the converted models cache directory', json_schema_extra=Categories.Paths)
|
||||
legacy_conf_dir : Path = Field(default=Path('configs/stable-diffusion'), description='Path to directory of legacy checkpoint config files', json_schema_extra=Categories.Paths)
|
||||
db_dir : Path = Field(default=Path('databases'), description='Path to InvokeAI databases directory', json_schema_extra=Categories.Paths)
|
||||
outdir : Path = Field(default=Path('outputs'), description='Default folder for output images', json_schema_extra=Categories.Paths)
|
||||
use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', json_schema_extra=Categories.Paths)
|
||||
custom_nodes_dir : Path = Field(default=Path('nodes'), description='Path to directory for custom nodes', json_schema_extra=Categories.Paths)
|
||||
from_file : Optional[Path] = Field(default=None, description='Take command input from the indicated file (command-line client only)', json_schema_extra=Categories.Paths)
|
||||
autoimport_dir: Path = Field(default=Path("autoimport"), description="Path to a directory of models files to be imported on startup.")
|
||||
models_dir: Path = Field(default=Path("models"), description="Path to the models directory.")
|
||||
convert_cache_dir: Path = Field(default=Path("models/.cache"), description="Path to the converted models cache directory. When loading a non-diffusers model, it will be converted and store on disk at this location.")
|
||||
legacy_conf_dir: Path = Field(default=Path("configs"), description="Path to directory of legacy checkpoint config files.")
|
||||
db_dir: Path = Field(default=Path("databases"), description="Path to InvokeAI databases directory.")
|
||||
outputs_dir: Path = Field(default=Path("outputs"), description="Path to directory for outputs.")
|
||||
custom_nodes_dir: Path = Field(default=Path("nodes"), description="Path to directory for custom nodes.")
|
||||
|
||||
# LOGGING
|
||||
log_handlers : List[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>"', json_schema_extra=Categories.Logging)
|
||||
log_handlers: list[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>".')
|
||||
# note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues
|
||||
log_format : Literal['plain', 'color', 'syslog', 'legacy'] = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style', json_schema_extra=Categories.Logging)
|
||||
log_level : Literal["debug", "info", "warning", "error", "critical"] = Field(default="info", description="Emit logging messages at this level or higher", json_schema_extra=Categories.Logging)
|
||||
log_sql : bool = Field(default=False, description="Log SQL queries", json_schema_extra=Categories.Logging)
|
||||
log_format: LOG_FORMAT = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style.')
|
||||
log_level: LOG_LEVEL = Field(default="info", description="Emit logging messages at this level or higher.")
|
||||
log_sql: bool = Field(default=False, description="Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose.")
|
||||
|
||||
# Development
|
||||
dev_reload : bool = Field(default=False, description="Automatically reload when Python sources are changed.", json_schema_extra=Categories.Development)
|
||||
profile_graphs : bool = Field(default=False, description="Enable graph profiling", json_schema_extra=Categories.Development)
|
||||
profile_prefix : Optional[str] = Field(default=None, description="An optional prefix for profile output files.", json_schema_extra=Categories.Development)
|
||||
profiles_dir : Path = Field(default=Path('profiles'), description="Directory for graph profiles", json_schema_extra=Categories.Development)
|
||||
skip_model_hash : bool = Field(default=False, description="Skip model hashing, instead assigning a UUID to models. Useful when using a memory db to reduce startup time.", json_schema_extra=Categories.Development)
|
||||
|
||||
version : bool = Field(default=False, description="Show InvokeAI version and exit", json_schema_extra=Categories.Other)
|
||||
use_memory_db: bool = Field(default=False, description="Use in-memory database. Useful for development.")
|
||||
dev_reload: bool = Field(default=False, description="Automatically reload when Python sources are changed. Does not reload node definitions.")
|
||||
profile_graphs: bool = Field(default=False, description="Enable graph profiling using `cProfile`.")
|
||||
profile_prefix: Optional[str] = Field(default=None, description="An optional prefix for profile output files.")
|
||||
profiles_dir: Path = Field(default=Path("profiles"), description="Path to profiles output directory.")
|
||||
|
||||
# CACHE
|
||||
ram : float = Field(default=DEFAULT_RAM_CACHE, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", json_schema_extra=Categories.ModelCache, )
|
||||
vram : float = Field(default=DEFAULT_VRAM_CACHE, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", json_schema_extra=Categories.ModelCache, )
|
||||
convert_cache : float = Field(default=DEFAULT_CONVERT_CACHE, ge=0, description="Maximum size of on-disk converted models cache (GB)", json_schema_extra=Categories.ModelCache)
|
||||
|
||||
lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", json_schema_extra=Categories.ModelCache, )
|
||||
log_memory_usage : bool = Field(default=False, description="If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.", json_schema_extra=Categories.ModelCache)
|
||||
ram: float = Field(default_factory=get_default_ram_cache_size, gt=0, description="Maximum memory amount used by memory model cache for rapid switching (GB).")
|
||||
vram: float = Field(default=DEFAULT_VRAM_CACHE, ge=0, description="Amount of VRAM reserved for model storage (GB).")
|
||||
convert_cache: float = Field(default=DEFAULT_CONVERT_CACHE, ge=0, description="Maximum size of on-disk converted models cache (GB).")
|
||||
lazy_offload: bool = Field(default=True, description="Keep models in VRAM until their space is needed.")
|
||||
log_memory_usage: bool = Field(default=False, description="If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.")
|
||||
|
||||
# DEVICE
|
||||
device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", json_schema_extra=Categories.Device)
|
||||
precision : Literal["auto", "float16", "bfloat16", "float32", "autocast"] = Field(default="auto", description="Floating point precision", json_schema_extra=Categories.Device)
|
||||
device: DEVICE = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.")
|
||||
precision: PRECISION = Field(default="auto", description="Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.")
|
||||
|
||||
# GENERATION
|
||||
sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", json_schema_extra=Categories.Generation)
|
||||
attention_type : Literal["auto", "normal", "xformers", "sliced", "torch-sdp"] = Field(default="auto", description="Attention type", json_schema_extra=Categories.Generation)
|
||||
attention_slice_size: Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8] = Field(default="auto", description='Slice size, valid when attention_type=="sliced"', json_schema_extra=Categories.Generation)
|
||||
force_tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", json_schema_extra=Categories.Generation)
|
||||
png_compress_level : int = Field(default=1, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = fastest, largest filesize, 9 = slowest, smallest filesize", json_schema_extra=Categories.Generation)
|
||||
|
||||
# QUEUE
|
||||
max_queue_size : int = Field(default=10000, gt=0, description="Maximum number of items in the session queue", json_schema_extra=Categories.Queue)
|
||||
sequential_guidance: bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.")
|
||||
attention_type: ATTENTION_TYPE = Field(default="auto", description="Attention type.")
|
||||
attention_slice_size: ATTENTION_SLICE_SIZE = Field(default="auto", description='Slice size, valid when attention_type=="sliced".')
|
||||
force_tiled_decode: bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty).")
|
||||
pil_compress_level: int = Field(default=1, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = no compression, 1 = fastest with slightly larger filesize, 9 = slowest with smallest filesize. 1 is typically the best setting.")
|
||||
max_queue_size: int = Field(default=10000, gt=0, description="Maximum number of items in the session queue.")
|
||||
|
||||
# NODES
|
||||
allow_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to allow. Omit to allow all.", json_schema_extra=Categories.Nodes)
|
||||
deny_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to deny. Omit to deny none.", json_schema_extra=Categories.Nodes)
|
||||
node_cache_size : int = Field(default=512, description="How many cached nodes to keep in memory", json_schema_extra=Categories.Nodes)
|
||||
allow_nodes: Optional[list[str]] = Field(default=None, description="List of nodes to allow. Omit to allow all.")
|
||||
deny_nodes: Optional[list[str]] = Field(default=None, description="List of nodes to deny. Omit to deny none.")
|
||||
node_cache_size: int = Field(default=512, description="How many cached nodes to keep in memory.")
|
||||
|
||||
# MODEL IMPORT
|
||||
civitai_api_key : Optional[str] = Field(default=os.environ.get("CIVITAI_API_KEY"), description="API key for CivitAI", json_schema_extra=Categories.Other)
|
||||
# MODEL INSTALL
|
||||
hashing_algorithm: HASHING_ALGORITHMS = Field(default="blake3_single", description="Model hashing algorthim for model installs. 'blake3_multi' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.")
|
||||
remote_api_tokens: Optional[list[URLRegexTokenPair]] = Field(default=None, description="List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.")
|
||||
|
||||
# DEPRECATED FIELDS - STILL HERE IN ORDER TO OBTAN VALUES FROM PRE-3.1 CONFIG FILES
|
||||
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", json_schema_extra=Categories.MemoryPerformance)
|
||||
max_cache_size : Optional[float] = Field(default=None, gt=0, description="Maximum memory amount used by model cache for rapid switching", json_schema_extra=Categories.MemoryPerformance)
|
||||
max_vram_cache_size : Optional[float] = Field(default=None, ge=0, description="Amount of VRAM reserved for model storage", json_schema_extra=Categories.MemoryPerformance)
|
||||
xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", json_schema_extra=Categories.MemoryPerformance)
|
||||
tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", json_schema_extra=Categories.MemoryPerformance)
|
||||
lora_dir : Optional[Path] = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
embedding_dir : Optional[Path] = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
controlnet_dir : Optional[Path] = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', json_schema_extra=Categories.Paths)
|
||||
conf_path : Path = Field(default=Path('configs/models.yaml'), description='Path to models definition file', json_schema_extra=Categories.Paths)
|
||||
|
||||
# this is not referred to in the source code and can be removed entirely
|
||||
#free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", json_schema_extra=Categories.MemoryPerformance)
|
||||
|
||||
# See InvokeAIAppConfig subclass below for CACHE and DEVICE categories
|
||||
# fmt: on
|
||||
|
||||
model_config = SettingsConfigDict(validate_assignment=True, env_prefix="INVOKEAI")
|
||||
model_config = SettingsConfigDict(env_prefix="INVOKEAI_", env_ignore_empty=True)
|
||||
|
||||
def parse_args(
|
||||
self,
|
||||
argv: Optional[list[str]] = None,
|
||||
conf: Optional[DictConfig] = None,
|
||||
clobber: Optional[bool] = False,
|
||||
) -> None:
|
||||
def update_config(self, config: dict[str, Any] | InvokeAIAppConfig, clobber: bool = True) -> None:
|
||||
"""Updates the config, overwriting existing values.
|
||||
|
||||
Args:
|
||||
config: A dictionary of config settings, or instance of `InvokeAIAppConfig`. If an instance of \
|
||||
`InvokeAIAppConfig`, only the explicitly set fields will be merged into the singleton config.
|
||||
clobber: If `True`, overwrite existing values. If `False`, only update fields that are not already set.
|
||||
"""
|
||||
Update settings with contents of init file, environment, and command-line settings.
|
||||
|
||||
:param conf: alternate Omegaconf dictionary object
|
||||
:param argv: aternate sys.argv list
|
||||
:param clobber: ovewrite any initialization parameters passed during initialization
|
||||
if isinstance(config, dict):
|
||||
new_config = self.model_validate(config)
|
||||
else:
|
||||
new_config = config
|
||||
|
||||
for field_name in new_config.model_fields_set:
|
||||
new_value = getattr(new_config, field_name)
|
||||
current_value = getattr(self, field_name)
|
||||
|
||||
if field_name in self.model_fields_set and not clobber:
|
||||
continue
|
||||
|
||||
if new_value != current_value:
|
||||
setattr(self, field_name, new_value)
|
||||
|
||||
def write_file(self, dest_path: Path, as_example: bool = False) -> None:
|
||||
"""Write the current configuration to file. This will overwrite the existing file.
|
||||
|
||||
A `meta` stanza is added to the top of the file, containing metadata about the config file. This is not stored in the config object.
|
||||
|
||||
Args:
|
||||
dest_path: Path to write the config to.
|
||||
"""
|
||||
# Set the runtime root directory. We parse command-line switches here
|
||||
# in order to pick up the --root_dir option.
|
||||
super().parse_args(argv)
|
||||
loaded_conf = None
|
||||
if conf is None:
|
||||
try:
|
||||
loaded_conf = OmegaConf.load(self.root_dir / INIT_FILE)
|
||||
except Exception:
|
||||
pass
|
||||
if isinstance(loaded_conf, DictConfig):
|
||||
InvokeAISettings.initconf = loaded_conf
|
||||
else:
|
||||
InvokeAISettings.initconf = conf
|
||||
dest_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(dest_path, "w") as file:
|
||||
# Meta fields should be written in a separate stanza - skip legacy_models_yaml_path
|
||||
meta_dict = self.model_dump(mode="json", include={"schema_version"})
|
||||
|
||||
# parse args again in order to pick up settings in configuration file
|
||||
super().parse_args(argv)
|
||||
# User settings
|
||||
config_dict = self.model_dump(
|
||||
mode="json",
|
||||
exclude_unset=False if as_example else True,
|
||||
exclude_defaults=False if as_example else True,
|
||||
exclude_none=True if as_example else False,
|
||||
exclude={"schema_version", "legacy_models_yaml_path"},
|
||||
)
|
||||
|
||||
if self.singleton_init and not clobber:
|
||||
# When setting values in this way, set validate_assignment to true if you want to validate the value.
|
||||
for k, v in self.singleton_init.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
@classmethod
|
||||
def get_config(cls, **kwargs: Any) -> InvokeAIAppConfig:
|
||||
"""Return a singleton InvokeAIAppConfig configuration object."""
|
||||
if (
|
||||
cls.singleton_config is None
|
||||
or type(cls.singleton_config) is not cls
|
||||
or (kwargs and cls.singleton_init != kwargs)
|
||||
):
|
||||
cls.singleton_config = cls(**kwargs)
|
||||
cls.singleton_init = kwargs
|
||||
return cls.singleton_config
|
||||
|
||||
@property
|
||||
def root_path(self) -> Path:
|
||||
"""Path to the runtime root directory."""
|
||||
if self.root:
|
||||
root = Path(self.root).expanduser().absolute()
|
||||
else:
|
||||
root = self.find_root().expanduser().absolute()
|
||||
self.root = root # insulate ourselves from relative paths that may change
|
||||
return root.resolve()
|
||||
|
||||
@property
|
||||
def root_dir(self) -> Path:
|
||||
"""Alias for above."""
|
||||
return self.root_path
|
||||
if as_example:
|
||||
file.write(
|
||||
"# This is an example file with default and example settings. Use the values here as a baseline.\n\n"
|
||||
)
|
||||
file.write("# Internal metadata - do not edit:\n")
|
||||
file.write(yaml.dump(meta_dict, sort_keys=False))
|
||||
file.write("\n")
|
||||
file.write("# Put user settings here - see https://invoke-ai.github.io/InvokeAI/features/CONFIGURATION/:\n")
|
||||
if len(config_dict) > 0:
|
||||
file.write(yaml.dump(config_dict, sort_keys=False))
|
||||
|
||||
def _resolve(self, partial_path: Path) -> Path:
|
||||
return (self.root_path / partial_path).resolve()
|
||||
|
||||
@property
|
||||
def init_file_path(self) -> Path:
|
||||
"""Path to invokeai.yaml."""
|
||||
resolved_path = self._resolve(INIT_FILE)
|
||||
def root_path(self) -> Path:
|
||||
"""Path to the runtime root directory, resolved to an absolute path."""
|
||||
if self._root:
|
||||
root = Path(self._root).expanduser().absolute()
|
||||
else:
|
||||
root = self.find_root().expanduser().absolute()
|
||||
self._root = root # insulate ourselves from relative paths that may change
|
||||
return root.resolve()
|
||||
|
||||
@property
|
||||
def config_file_path(self) -> Path:
|
||||
"""Path to invokeai.yaml, resolved to an absolute path.."""
|
||||
resolved_path = self._resolve(self._config_file or INIT_FILE)
|
||||
assert resolved_path is not None
|
||||
return resolved_path
|
||||
|
||||
@property
|
||||
def output_path(self) -> Optional[Path]:
|
||||
"""Path to defaults outputs directory."""
|
||||
return self._resolve(self.outdir)
|
||||
def autoimport_path(self) -> Path:
|
||||
"""Path to the autoimports directory, resolved to an absolute path.."""
|
||||
return self._resolve(self.autoimport_dir)
|
||||
|
||||
@property
|
||||
def outputs_path(self) -> Optional[Path]:
|
||||
"""Path to the outputs directory, resolved to an absolute path.."""
|
||||
return self._resolve(self.outputs_dir)
|
||||
|
||||
@property
|
||||
def db_path(self) -> Path:
|
||||
"""Path to the invokeai.db file."""
|
||||
"""Path to the invokeai.db file, resolved to an absolute path.."""
|
||||
db_dir = self._resolve(self.db_dir)
|
||||
assert db_dir is not None
|
||||
return db_dir / DB_FILE
|
||||
|
||||
@property
|
||||
def model_conf_path(self) -> Path:
|
||||
"""Path to models configuration file."""
|
||||
return self._resolve(self.conf_path)
|
||||
|
||||
@property
|
||||
def legacy_conf_path(self) -> Path:
|
||||
"""Path to directory of legacy configuration files (e.g. v1-inference.yaml)."""
|
||||
"""Path to directory of legacy configuration files (e.g. v1-inference.yaml), resolved to an absolute path.."""
|
||||
return self._resolve(self.legacy_conf_dir)
|
||||
|
||||
@property
|
||||
def models_path(self) -> Path:
|
||||
"""Path to the models directory."""
|
||||
"""Path to the models directory, resolved to an absolute path.."""
|
||||
return self._resolve(self.models_dir)
|
||||
|
||||
@property
|
||||
def models_convert_cache_path(self) -> Path:
|
||||
"""Path to the converted cache models directory."""
|
||||
def convert_cache_path(self) -> Path:
|
||||
"""Path to the converted cache models directory, resolved to an absolute path.."""
|
||||
return self._resolve(self.convert_cache_dir)
|
||||
|
||||
@property
|
||||
def custom_nodes_path(self) -> Path:
|
||||
"""Path to the custom nodes directory."""
|
||||
"""Path to the custom nodes directory, resolved to an absolute path.."""
|
||||
custom_nodes_path = self._resolve(self.custom_nodes_dir)
|
||||
assert custom_nodes_path is not None
|
||||
return custom_nodes_path
|
||||
|
||||
# the following methods support legacy calls leftover from the Globals era
|
||||
@property
|
||||
def full_precision(self) -> bool:
|
||||
"""Return true if precision set to float32."""
|
||||
return self.precision == "float32"
|
||||
|
||||
@property
|
||||
def try_patchmatch(self) -> bool:
|
||||
"""Return true if patchmatch true."""
|
||||
return self.patchmatch
|
||||
|
||||
@property
|
||||
def nsfw_checker(self) -> bool:
|
||||
"""Return value for NSFW checker. The NSFW node is always active and disabled from Web UI."""
|
||||
return True
|
||||
|
||||
@property
|
||||
def invisible_watermark(self) -> bool:
|
||||
"""Return value of invisible watermark. It is always active and disabled from Web UI."""
|
||||
return True
|
||||
|
||||
@property
|
||||
def ram_cache_size(self) -> float:
|
||||
"""Return the ram cache size using the legacy or modern setting (GB)."""
|
||||
return self.max_cache_size or self.ram
|
||||
|
||||
@property
|
||||
def vram_cache_size(self) -> float:
|
||||
"""Return the vram cache size using the legacy or modern setting (GB)."""
|
||||
return self.max_vram_cache_size or self.vram
|
||||
|
||||
@property
|
||||
def convert_cache_size(self) -> float:
|
||||
"""Return the convert cache size on disk (GB)."""
|
||||
return self.convert_cache
|
||||
|
||||
@property
|
||||
def use_cpu(self) -> bool:
|
||||
"""Return true if the device is set to CPU or the always_use_cpu flag is set."""
|
||||
return self.always_use_cpu or self.device == "cpu"
|
||||
|
||||
@property
|
||||
def disable_xformers(self) -> bool:
|
||||
"""Return true if enable_xformers is false (reversed logic) and attention type is not set to xformers."""
|
||||
disabled_in_config = not self.xformers_enabled
|
||||
return disabled_in_config and self.attention_type != "xformers"
|
||||
|
||||
@property
|
||||
def profiles_path(self) -> Path:
|
||||
"""Path to the graph profiles directory."""
|
||||
"""Path to the graph profiles directory, resolved to an absolute path.."""
|
||||
return self._resolve(self.profiles_dir)
|
||||
|
||||
@staticmethod
|
||||
def find_root() -> Path:
|
||||
"""Choose the runtime root directory when not specified on command line or init file."""
|
||||
return _find_root()
|
||||
venv = Path(os.environ.get("VIRTUAL_ENV") or ".")
|
||||
if os.environ.get("INVOKEAI_ROOT"):
|
||||
root = Path(os.environ["INVOKEAI_ROOT"])
|
||||
elif any((venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE]):
|
||||
root = (venv.parent).resolve()
|
||||
else:
|
||||
root = Path("~/invokeai").expanduser().resolve()
|
||||
return root
|
||||
|
||||
|
||||
def get_invokeai_config(**kwargs: Any) -> InvokeAIAppConfig:
|
||||
"""Legacy function which returns InvokeAIAppConfig.get_config()."""
|
||||
return InvokeAIAppConfig.get_config(**kwargs)
|
||||
class DefaultInvokeAIAppConfig(InvokeAIAppConfig):
|
||||
"""A version of `InvokeAIAppConfig` that does not automatically parse any settings from environment variables
|
||||
or any file.
|
||||
|
||||
This is useful for writing out a default config file.
|
||||
|
||||
Note that init settings are set if provided.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def settings_customise_sources(
|
||||
cls,
|
||||
settings_cls: type[BaseSettings],
|
||||
init_settings: PydanticBaseSettingsSource,
|
||||
env_settings: PydanticBaseSettingsSource,
|
||||
dotenv_settings: PydanticBaseSettingsSource,
|
||||
file_secret_settings: PydanticBaseSettingsSource,
|
||||
) -> tuple[PydanticBaseSettingsSource, ...]:
|
||||
return (init_settings,)
|
||||
|
||||
|
||||
def _find_root() -> Path:
|
||||
venv = Path(os.environ.get("VIRTUAL_ENV") or ".")
|
||||
if os.environ.get("INVOKEAI_ROOT"):
|
||||
root = Path(os.environ["INVOKEAI_ROOT"])
|
||||
elif any((venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE]):
|
||||
root = (venv.parent).resolve()
|
||||
def migrate_v3_config_dict(config_dict: dict[str, Any]) -> InvokeAIAppConfig:
|
||||
"""Migrate a v3 config dictionary to a current config object.
|
||||
|
||||
Args:
|
||||
config_dict: A dictionary of settings from a v3 config file.
|
||||
|
||||
Returns:
|
||||
An instance of `InvokeAIAppConfig` with the migrated settings.
|
||||
|
||||
"""
|
||||
parsed_config_dict: dict[str, Any] = {}
|
||||
for _category_name, category_dict in config_dict["InvokeAI"].items():
|
||||
for k, v in category_dict.items():
|
||||
# `outdir` was renamed to `outputs_dir` in v4
|
||||
if k == "outdir":
|
||||
parsed_config_dict["outputs_dir"] = v
|
||||
# `max_cache_size` was renamed to `ram` some time in v3, but both names were used
|
||||
if k == "max_cache_size" and "ram" not in category_dict:
|
||||
parsed_config_dict["ram"] = v
|
||||
# `max_vram_cache_size` was renamed to `vram` some time in v3, but both names were used
|
||||
if k == "max_vram_cache_size" and "vram" not in category_dict:
|
||||
parsed_config_dict["vram"] = v
|
||||
if k == "conf_path":
|
||||
parsed_config_dict["legacy_models_yaml_path"] = v
|
||||
if k == "legacy_conf_dir":
|
||||
# The old default for this was "configs/stable-diffusion". If if the incoming config has that as the value, we won't set it.
|
||||
# Else if the path ends in "stable-diffusion", we assume the parent is the new correct path.
|
||||
# Else we do not attempt to migrate this setting
|
||||
if v != "configs/stable-diffusion":
|
||||
parsed_config_dict["legacy_conf_dir"] = v
|
||||
elif Path(v).name == "stable-diffusion":
|
||||
parsed_config_dict["legacy_conf_dir"] = str(Path(v).parent)
|
||||
elif k in InvokeAIAppConfig.model_fields:
|
||||
# skip unknown fields
|
||||
parsed_config_dict[k] = v
|
||||
# When migrating the config file, we should not include currently-set environment variables.
|
||||
config = DefaultInvokeAIAppConfig.model_validate(parsed_config_dict)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def load_and_migrate_config(config_path: Path) -> InvokeAIAppConfig:
|
||||
"""Load and migrate a config file to the latest version.
|
||||
|
||||
Args:
|
||||
config_path: Path to the config file.
|
||||
|
||||
Returns:
|
||||
An instance of `InvokeAIAppConfig` with the loaded and migrated settings.
|
||||
"""
|
||||
assert config_path.suffix == ".yaml"
|
||||
with open(config_path) as file:
|
||||
loaded_config_dict = yaml.safe_load(file)
|
||||
|
||||
assert isinstance(loaded_config_dict, dict)
|
||||
|
||||
if "InvokeAI" in loaded_config_dict:
|
||||
# This is a v3 config file, attempt to migrate it
|
||||
shutil.copy(config_path, config_path.with_suffix(".yaml.bak"))
|
||||
try:
|
||||
# loaded_config_dict could be the wrong shape, but we will catch all exceptions below
|
||||
migrated_config = migrate_v3_config_dict(loaded_config_dict) # pyright: ignore [reportUnknownArgumentType]
|
||||
except Exception as e:
|
||||
shutil.copy(config_path.with_suffix(".yaml.bak"), config_path)
|
||||
raise RuntimeError(f"Failed to load and migrate v3 config file {config_path}: {e}") from e
|
||||
migrated_config.write_file(config_path)
|
||||
return migrated_config
|
||||
else:
|
||||
root = Path("~/invokeai").expanduser().resolve()
|
||||
return root
|
||||
# Attempt to load as a v4 config file
|
||||
try:
|
||||
# Meta is not included in the model fields, so we need to validate it separately
|
||||
config = InvokeAIAppConfig.model_validate(loaded_config_dict)
|
||||
assert (
|
||||
config.schema_version == CONFIG_SCHEMA_VERSION
|
||||
), f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config.schema_version}"
|
||||
return config
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to load config file {config_path}: {e}") from e
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def get_config() -> InvokeAIAppConfig:
|
||||
"""Get the global singleton app config.
|
||||
|
||||
When first called, this function:
|
||||
- Creates a config object. `pydantic-settings` handles merging of settings from environment variables, but not the init file.
|
||||
- Retrieves any provided CLI args from the InvokeAIArgs class. It does not _parse_ the CLI args; that is done in the main entrypoint.
|
||||
- Sets the root dir, if provided via CLI args.
|
||||
- Logs in to HF if there is no valid token already.
|
||||
- Copies all legacy configs to the legacy conf dir (needed for conversion from ckpt to diffusers).
|
||||
- Reads and merges in settings from the config file if it exists, else writes out a default config file.
|
||||
|
||||
On subsequent calls, the object is returned from the cache.
|
||||
"""
|
||||
# This object includes environment variables, as parsed by pydantic-settings
|
||||
config = InvokeAIAppConfig()
|
||||
|
||||
args = InvokeAIArgs.args
|
||||
|
||||
# This flag serves as a proxy for whether the config was retrieved in the context of the full application or not.
|
||||
# If it is False, we should just return a default config and not set the root, log in to HF, etc.
|
||||
if not InvokeAIArgs.did_parse:
|
||||
return config
|
||||
|
||||
# Set CLI args
|
||||
if root := getattr(args, "root", None):
|
||||
config._root = Path(root)
|
||||
if config_file := getattr(args, "config_file", None):
|
||||
config._config_file = Path(config_file)
|
||||
|
||||
# Create the example config file, with some extra example values provided
|
||||
example_config = DefaultInvokeAIAppConfig()
|
||||
example_config.remote_api_tokens = [
|
||||
URLRegexTokenPair(url_regex="cool-models.com", token="my_secret_token"),
|
||||
URLRegexTokenPair(url_regex="nifty-models.com", token="some_other_token"),
|
||||
]
|
||||
example_config.write_file(config.config_file_path.with_suffix(".example.yaml"), as_example=True)
|
||||
|
||||
# Copy all legacy configs - We know `__path__[0]` is correct here
|
||||
configs_src = Path(model_configs.__path__[0]) # pyright: ignore [reportUnknownMemberType, reportUnknownArgumentType, reportAttributeAccessIssue]
|
||||
shutil.copytree(configs_src, config.legacy_conf_path, dirs_exist_ok=True)
|
||||
|
||||
if config.config_file_path.exists():
|
||||
config_from_file = load_and_migrate_config(config.config_file_path)
|
||||
# Clobbering here will overwrite any settings that were set via environment variables
|
||||
config.update_config(config_from_file, clobber=False)
|
||||
else:
|
||||
# We should never write env vars to the config file
|
||||
default_config = DefaultInvokeAIAppConfig()
|
||||
default_config.write_file(config.config_file_path, as_example=False)
|
||||
|
||||
return config
|
||||
|
@ -87,6 +87,8 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
self._queue.queue.clear()
|
||||
self.join() # wait for all active jobs to finish
|
||||
self._stop_event.set()
|
||||
for thread in self._worker_pool:
|
||||
thread.join()
|
||||
self._worker_pool.clear()
|
||||
|
||||
def submit_download_job(
|
||||
|
@ -12,6 +12,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
)
|
||||
from invokeai.app.util.misc import get_timestamp
|
||||
from invokeai.backend.model_manager import AnyModelConfig
|
||||
from invokeai.backend.model_manager.config import SubModelType
|
||||
|
||||
|
||||
class EventServiceBase:
|
||||
@ -80,7 +81,7 @@ class EventServiceBase:
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
"node_id": node_id,
|
||||
"source_node_id": source_node_id,
|
||||
"progress_image": progress_image.model_dump() if progress_image is not None else None,
|
||||
"progress_image": progress_image.model_dump(mode="json") if progress_image is not None else None,
|
||||
"step": step,
|
||||
"order": order,
|
||||
"total_steps": total_steps,
|
||||
@ -180,6 +181,7 @@ class EventServiceBase:
|
||||
queue_batch_id: str,
|
||||
graph_execution_state_id: str,
|
||||
model_config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> None:
|
||||
"""Emitted when a model is requested"""
|
||||
self.__emit_queue_event(
|
||||
@ -189,7 +191,8 @@ class EventServiceBase:
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
"model_config": model_config.model_dump(),
|
||||
"model_config": model_config.model_dump(mode="json"),
|
||||
"submodel_type": submodel_type,
|
||||
},
|
||||
)
|
||||
|
||||
@ -200,6 +203,7 @@ class EventServiceBase:
|
||||
queue_batch_id: str,
|
||||
graph_execution_state_id: str,
|
||||
model_config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> None:
|
||||
"""Emitted when a model is correctly loaded (returns model info)"""
|
||||
self.__emit_queue_event(
|
||||
@ -209,7 +213,8 @@ class EventServiceBase:
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
"model_config": model_config.model_dump(),
|
||||
"model_config": model_config.model_dump(mode="json"),
|
||||
"submodel_type": submodel_type,
|
||||
},
|
||||
)
|
||||
|
||||
@ -254,8 +259,8 @@ class EventServiceBase:
|
||||
"started_at": str(session_queue_item.started_at) if session_queue_item.started_at else None,
|
||||
"completed_at": str(session_queue_item.completed_at) if session_queue_item.completed_at else None,
|
||||
},
|
||||
"batch_status": batch_status.model_dump(),
|
||||
"queue_status": queue_status.model_dump(),
|
||||
"batch_status": batch_status.model_dump(mode="json"),
|
||||
"queue_status": queue_status.model_dump(mode="json"),
|
||||
},
|
||||
)
|
||||
|
||||
@ -381,6 +386,17 @@ class EventServiceBase:
|
||||
},
|
||||
)
|
||||
|
||||
def emit_model_install_downloads_done(self, source: str) -> None:
|
||||
"""
|
||||
Emit once when all parts are downloaded, but before the probing and registration start.
|
||||
|
||||
:param source: Source of the model; local path, repo_id or url
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_downloads_done",
|
||||
payload={"source": source},
|
||||
)
|
||||
|
||||
def emit_model_install_running(self, source: str) -> None:
|
||||
"""
|
||||
Emit once when an install job becomes active.
|
||||
@ -405,7 +421,7 @@ class EventServiceBase:
|
||||
payload={"source": source, "total_bytes": total_bytes, "key": key, "id": id},
|
||||
)
|
||||
|
||||
def emit_model_install_cancelled(self, source: str) -> None:
|
||||
def emit_model_install_cancelled(self, source: str, id: int) -> None:
|
||||
"""
|
||||
Emit when an install job is cancelled.
|
||||
|
||||
@ -413,7 +429,7 @@ class EventServiceBase:
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_cancelled",
|
||||
payload={"source": source},
|
||||
payload={"source": source, "id": id},
|
||||
)
|
||||
|
||||
def emit_model_install_error(self, source: str, error_type: str, error: str, id: int) -> None:
|
||||
|
@ -82,7 +82,7 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
||||
image_path,
|
||||
"PNG",
|
||||
pnginfo=pnginfo,
|
||||
compress_level=self.__invoker.services.configuration.png_compress_level,
|
||||
compress_level=self.__invoker.services.configuration.pil_compress_level,
|
||||
)
|
||||
|
||||
thumbnail_name = get_thumbnail_name(image_name)
|
||||
|
@ -41,8 +41,9 @@ class InvocationCacheBase(ABC):
|
||||
"""Clears the cache"""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def create_key(self, invocation: BaseInvocation) -> int:
|
||||
def create_key(invocation: BaseInvocation) -> int:
|
||||
"""Gets the key for the invocation's cache item"""
|
||||
pass
|
||||
|
||||
|
@ -61,9 +61,7 @@ class MemoryInvocationCache(InvocationCacheBase):
|
||||
self._delete_oldest_access(number_to_delete)
|
||||
self._cache[key] = CachedItem(
|
||||
invocation_output,
|
||||
invocation_output.model_dump_json(
|
||||
warnings=False, exclude_defaults=True, exclude_unset=True, include={"type"}
|
||||
),
|
||||
invocation_output.model_dump_json(warnings=False, exclude_defaults=True, exclude_unset=True),
|
||||
)
|
||||
|
||||
def _delete_oldest_access(self, number_to_delete: int) -> None:
|
||||
@ -81,7 +79,7 @@ class MemoryInvocationCache(InvocationCacheBase):
|
||||
with self._lock:
|
||||
return self._delete(key)
|
||||
|
||||
def clear(self, *args, **kwargs) -> None:
|
||||
def clear(self) -> None:
|
||||
with self._lock:
|
||||
if self._max_cache_size == 0:
|
||||
return
|
||||
|
@ -25,6 +25,7 @@ if TYPE_CHECKING:
|
||||
from .images.images_base import ImageServiceABC
|
||||
from .invocation_cache.invocation_cache_base import InvocationCacheBase
|
||||
from .invocation_stats.invocation_stats_base import InvocationStatsServiceBase
|
||||
from .model_images.model_images_base import ModelImageFileStorageBase
|
||||
from .model_manager.model_manager_base import ModelManagerServiceBase
|
||||
from .names.names_base import NameServiceBase
|
||||
from .session_processor.session_processor_base import SessionProcessorBase
|
||||
@ -49,6 +50,7 @@ class InvocationServices:
|
||||
image_files: "ImageFileStorageBase",
|
||||
image_records: "ImageRecordStorageBase",
|
||||
logger: "Logger",
|
||||
model_images: "ModelImageFileStorageBase",
|
||||
model_manager: "ModelManagerServiceBase",
|
||||
download_queue: "DownloadQueueServiceBase",
|
||||
performance_statistics: "InvocationStatsServiceBase",
|
||||
@ -72,6 +74,7 @@ class InvocationServices:
|
||||
self.image_files = image_files
|
||||
self.image_records = image_records
|
||||
self.logger = logger
|
||||
self.model_images = model_images
|
||||
self.model_manager = model_manager
|
||||
self.download_queue = download_queue
|
||||
self.performance_statistics = performance_statistics
|
||||
|
33
invokeai/app/services/model_images/model_images_base.py
Normal file
33
invokeai/app/services/model_images/model_images_base.py
Normal file
@ -0,0 +1,33 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
|
||||
from PIL.Image import Image as PILImageType
|
||||
|
||||
|
||||
class ModelImageFileStorageBase(ABC):
|
||||
"""Low-level service responsible for storing and retrieving image files."""
|
||||
|
||||
@abstractmethod
|
||||
def get(self, model_key: str) -> PILImageType:
|
||||
"""Retrieves a model image as PIL Image."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_path(self, model_key: str) -> Path:
|
||||
"""Gets the internal path to a model image."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_url(self, model_key: str) -> str | None:
|
||||
"""Gets the URL to fetch a model image."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def save(self, image: PILImageType, model_key: str) -> None:
|
||||
"""Saves a model image."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete(self, model_key: str) -> None:
|
||||
"""Deletes a model image."""
|
||||
pass
|
20
invokeai/app/services/model_images/model_images_common.py
Normal file
20
invokeai/app/services/model_images/model_images_common.py
Normal file
@ -0,0 +1,20 @@
|
||||
# TODO: Should these excpetions subclass existing python exceptions?
|
||||
class ModelImageFileNotFoundException(Exception):
|
||||
"""Raised when an image file is not found in storage."""
|
||||
|
||||
def __init__(self, message="Model image file not found"):
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class ModelImageFileSaveException(Exception):
|
||||
"""Raised when an image cannot be saved."""
|
||||
|
||||
def __init__(self, message="Model image file not saved"):
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class ModelImageFileDeleteException(Exception):
|
||||
"""Raised when an image cannot be deleted."""
|
||||
|
||||
def __init__(self, message="Model image file not deleted"):
|
||||
super().__init__(message)
|
85
invokeai/app/services/model_images/model_images_default.py
Normal file
85
invokeai/app/services/model_images/model_images_default.py
Normal file
@ -0,0 +1,85 @@
|
||||
from pathlib import Path
|
||||
|
||||
from PIL import Image
|
||||
from PIL.Image import Image as PILImageType
|
||||
from send2trash import send2trash
|
||||
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
from invokeai.app.util.thumbnails import make_thumbnail
|
||||
|
||||
from .model_images_base import ModelImageFileStorageBase
|
||||
from .model_images_common import (
|
||||
ModelImageFileDeleteException,
|
||||
ModelImageFileNotFoundException,
|
||||
ModelImageFileSaveException,
|
||||
)
|
||||
|
||||
|
||||
class ModelImageFileStorageDisk(ModelImageFileStorageBase):
|
||||
"""Stores images on disk"""
|
||||
|
||||
def __init__(self, model_images_folder: Path):
|
||||
self._model_images_folder = model_images_folder
|
||||
self._validate_storage_folders()
|
||||
|
||||
def start(self, invoker: Invoker) -> None:
|
||||
self._invoker = invoker
|
||||
|
||||
def get(self, model_key: str) -> PILImageType:
|
||||
try:
|
||||
path = self.get_path(model_key)
|
||||
|
||||
if not self._validate_path(path):
|
||||
raise ModelImageFileNotFoundException
|
||||
|
||||
return Image.open(path)
|
||||
except FileNotFoundError as e:
|
||||
raise ModelImageFileNotFoundException from e
|
||||
|
||||
def save(self, image: PILImageType, model_key: str) -> None:
|
||||
try:
|
||||
self._validate_storage_folders()
|
||||
image_path = self._model_images_folder / (model_key + ".webp")
|
||||
thumbnail = make_thumbnail(image, 256)
|
||||
thumbnail.save(image_path, format="webp")
|
||||
|
||||
except Exception as e:
|
||||
raise ModelImageFileSaveException from e
|
||||
|
||||
def get_path(self, model_key: str) -> Path:
|
||||
path = self._model_images_folder / (model_key + ".webp")
|
||||
|
||||
return path
|
||||
|
||||
def get_url(self, model_key: str) -> str | None:
|
||||
path = self.get_path(model_key)
|
||||
if not self._validate_path(path):
|
||||
return
|
||||
|
||||
url = self._invoker.services.urls.get_model_image_url(model_key)
|
||||
|
||||
# The image URL never changes, so we must add random query string to it to prevent caching
|
||||
url += f"?{uuid_string()}"
|
||||
|
||||
return url
|
||||
|
||||
def delete(self, model_key: str) -> None:
|
||||
try:
|
||||
path = self.get_path(model_key)
|
||||
|
||||
if not self._validate_path(path):
|
||||
raise ModelImageFileNotFoundException
|
||||
|
||||
send2trash(path)
|
||||
|
||||
except Exception as e:
|
||||
raise ModelImageFileDeleteException from e
|
||||
|
||||
def _validate_path(self, path: Path) -> bool:
|
||||
"""Validates the path given for an image."""
|
||||
return path.exists()
|
||||
|
||||
def _validate_storage_folders(self) -> None:
|
||||
"""Checks if the required folders exist and create them if they don't"""
|
||||
self._model_images_folder.mkdir(parents=True, exist_ok=True)
|
@ -1,7 +1,6 @@
|
||||
"""Initialization file for model install service package."""
|
||||
|
||||
from .model_install_base import (
|
||||
CivitaiModelSource,
|
||||
HFModelSource,
|
||||
InstallStatus,
|
||||
LocalModelSource,
|
||||
@ -23,5 +22,4 @@ __all__ = [
|
||||
"LocalModelSource",
|
||||
"HFModelSource",
|
||||
"URLModelSource",
|
||||
"CivitaiModelSource",
|
||||
]
|
||||
|
@ -91,21 +91,6 @@ class LocalModelSource(StringLikeSource):
|
||||
return Path(self.path).as_posix()
|
||||
|
||||
|
||||
class CivitaiModelSource(StringLikeSource):
|
||||
"""A Civitai version id, with optional variant and access token."""
|
||||
|
||||
version_id: int
|
||||
variant: Optional[ModelRepoVariant] = None
|
||||
access_token: Optional[str] = None
|
||||
type: Literal["civitai"] = "civitai"
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Return string version of repoid when string rep needed."""
|
||||
base: str = str(self.version_id)
|
||||
base += f" ({self.variant})" if self.variant else ""
|
||||
return base
|
||||
|
||||
|
||||
class HFModelSource(StringLikeSource):
|
||||
"""
|
||||
A HuggingFace repo_id with optional variant, sub-folder and access token.
|
||||
@ -129,8 +114,10 @@ class HFModelSource(StringLikeSource):
|
||||
def __str__(self) -> str:
|
||||
"""Return string version of repoid when string rep needed."""
|
||||
base: str = self.repo_id
|
||||
base += f":{self.variant or ''}"
|
||||
base += f":{self.subfolder}" if self.subfolder else ""
|
||||
if self.variant:
|
||||
base += f":{self.variant or ''}"
|
||||
if self.subfolder:
|
||||
base += f":{self.subfolder}"
|
||||
return base
|
||||
|
||||
|
||||
@ -146,14 +133,11 @@ class URLModelSource(StringLikeSource):
|
||||
return str(self.url)
|
||||
|
||||
|
||||
ModelSource = Annotated[
|
||||
Union[LocalModelSource, HFModelSource, CivitaiModelSource, URLModelSource], Field(discriminator="type")
|
||||
]
|
||||
ModelSource = Annotated[Union[LocalModelSource, HFModelSource, URLModelSource], Field(discriminator="type")]
|
||||
|
||||
MODEL_SOURCE_TO_TYPE_MAP = {
|
||||
URLModelSource: ModelSourceType.Url,
|
||||
HFModelSource: ModelSourceType.HFRepoID,
|
||||
CivitaiModelSource: ModelSourceType.CivitAI,
|
||||
LocalModelSource: ModelSourceType.Path,
|
||||
}
|
||||
|
||||
|
@ -11,6 +11,7 @@ from shutil import copyfile, copytree, move, rmtree
|
||||
from tempfile import mkdtemp
|
||||
from typing import Any, Dict, List, Optional, Set, Union
|
||||
|
||||
import yaml
|
||||
from huggingface_hub import HfFolder
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
from requests import Session
|
||||
@ -21,7 +22,6 @@ from invokeai.app.services.events.events_base import EventServiceBase
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.model_records import DuplicateModelException, ModelRecordServiceBase
|
||||
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
|
||||
from invokeai.app.util.misc import uuid_string
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
@ -33,12 +33,12 @@ from invokeai.backend.model_manager.config import (
|
||||
)
|
||||
from invokeai.backend.model_manager.metadata import (
|
||||
AnyModelRepoMetadata,
|
||||
CivitaiMetadataFetch,
|
||||
HuggingFaceMetadataFetch,
|
||||
ModelMetadataFetchBase,
|
||||
ModelMetadataWithFiles,
|
||||
RemoteModelFile,
|
||||
)
|
||||
from invokeai.backend.model_manager.metadata.metadata_base import CivitaiMetadata, HuggingFaceMetadata
|
||||
from invokeai.backend.model_manager.metadata.metadata_base import HuggingFaceMetadata
|
||||
from invokeai.backend.model_manager.probe import ModelProbe
|
||||
from invokeai.backend.model_manager.search import ModelSearch
|
||||
from invokeai.backend.util import Chdir, InvokeAILogger
|
||||
@ -46,7 +46,6 @@ from invokeai.backend.util.devices import choose_precision, choose_torch_device
|
||||
|
||||
from .model_install_base import (
|
||||
MODEL_SOURCE_TO_TYPE_MAP,
|
||||
CivitaiModelSource,
|
||||
HFModelSource,
|
||||
InstallStatus,
|
||||
LocalModelSource,
|
||||
@ -94,6 +93,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
self._download_cache: Dict[AnyHttpUrl, ModelInstallJob] = {}
|
||||
self._running = False
|
||||
self._session = session
|
||||
self._install_thread: Optional[threading.Thread] = None
|
||||
self._next_job_id = 0
|
||||
|
||||
@property
|
||||
@ -117,6 +117,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
raise Exception("Attempt to start the installer service twice")
|
||||
self._start_installer_thread()
|
||||
self._remove_dangling_install_dirs()
|
||||
self._migrate_yaml()
|
||||
self.sync_to_config()
|
||||
|
||||
def stop(self, invoker: Optional[Invoker] = None) -> None:
|
||||
@ -125,15 +126,30 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
if not self._running:
|
||||
raise Exception("Attempt to stop the install service before it was started")
|
||||
self._stop_event.set()
|
||||
with self._install_queue.mutex:
|
||||
self._install_queue.queue.clear() # get rid of pending jobs
|
||||
active_jobs = [x for x in self.list_jobs() if x.running]
|
||||
if active_jobs:
|
||||
self._logger.warning("Waiting for active install job to complete")
|
||||
self.wait_for_installs()
|
||||
self._clear_pending_jobs()
|
||||
self._download_cache.clear()
|
||||
assert self._install_thread is not None
|
||||
self._install_thread.join()
|
||||
self._running = False
|
||||
|
||||
def _clear_pending_jobs(self) -> None:
|
||||
for job in self.list_jobs():
|
||||
if not job.in_terminal_state:
|
||||
self._logger.warning("Cancelling job {job.id}")
|
||||
self.cancel_job(job)
|
||||
while True:
|
||||
try:
|
||||
job = self._install_queue.get(block=False)
|
||||
self._install_queue.task_done()
|
||||
except Empty:
|
||||
break
|
||||
|
||||
def _put_in_queue(self, job: ModelInstallJob) -> None:
|
||||
if self._stop_event.is_set():
|
||||
self.cancel_job(job)
|
||||
else:
|
||||
self._install_queue.put(job)
|
||||
|
||||
def register_path(
|
||||
self,
|
||||
model_path: Union[Path, str],
|
||||
@ -154,10 +170,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
model_path = Path(model_path)
|
||||
config = config or {}
|
||||
|
||||
if self._app_config.skip_model_hash:
|
||||
config["hash"] = uuid_string()
|
||||
|
||||
info: AnyModelConfig = ModelProbe.probe(Path(model_path), config)
|
||||
info: AnyModelConfig = ModelProbe.probe(Path(model_path), config, hash_algo=self._app_config.hashing_algorithm)
|
||||
|
||||
if preferred_name := config.get("name"):
|
||||
preferred_name = Path(preferred_name).with_suffix(model_path.suffix)
|
||||
@ -199,9 +212,16 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
access_token=access_token,
|
||||
)
|
||||
elif re.match(r"^https?://[^/]+", source):
|
||||
# Pull the token from config if it exists and matches the URL
|
||||
_token = access_token
|
||||
if _token is None:
|
||||
for pair in self.app_config.remote_api_tokens or []:
|
||||
if re.search(pair.url_regex, source):
|
||||
_token = pair.token
|
||||
break
|
||||
source_obj = URLModelSource(
|
||||
url=AnyHttpUrl(source),
|
||||
access_token=access_token,
|
||||
access_token=_token,
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported model source: '{source}'")
|
||||
@ -215,9 +235,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
|
||||
if isinstance(source, LocalModelSource):
|
||||
install_job = self._import_local_model(source, config)
|
||||
self._install_queue.put(install_job) # synchronously install
|
||||
elif isinstance(source, CivitaiModelSource):
|
||||
install_job = self._import_from_civitai(source, config)
|
||||
self._put_in_queue(install_job) # synchronously install
|
||||
elif isinstance(source, HFModelSource):
|
||||
install_job = self._import_from_hf(source, config)
|
||||
elif isinstance(source, URLModelSource):
|
||||
@ -252,7 +270,6 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
raise TimeoutError("Timeout exceeded")
|
||||
return job
|
||||
|
||||
# TODO: Better name? Maybe wait_for_jobs()? Maybe too easily confused with above
|
||||
def wait_for_installs(self, timeout: int = 0) -> List[ModelInstallJob]: # noqa D102
|
||||
"""Block until all installation jobs are done."""
|
||||
start = time.time()
|
||||
@ -262,6 +279,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
if timeout > 0 and time.time() - start > timeout:
|
||||
raise TimeoutError("Timeout exceeded")
|
||||
self._install_queue.join()
|
||||
|
||||
return self._install_jobs
|
||||
|
||||
def cancel_job(self, job: ModelInstallJob) -> None:
|
||||
@ -278,16 +296,67 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
def sync_to_config(self) -> None:
|
||||
"""Synchronize models on disk to those in the config record store database."""
|
||||
self._scan_models_directory()
|
||||
if autoimport := self._app_config.autoimport_dir:
|
||||
if self._app_config.autoimport_path:
|
||||
self._logger.info("Scanning autoimport directory for new models")
|
||||
installed = self.scan_directory(self._app_config.root_path / autoimport)
|
||||
installed = self.scan_directory(self._app_config.autoimport_path)
|
||||
self._logger.info(f"{len(installed)} new models registered")
|
||||
self._logger.info("Model installer (re)initialized")
|
||||
|
||||
def _migrate_yaml(self) -> None:
|
||||
db_models = self.record_store.all_models()
|
||||
|
||||
legacy_models_yaml_path = (
|
||||
self._app_config.legacy_models_yaml_path or self._app_config.root_path / "configs" / "models.yaml"
|
||||
)
|
||||
|
||||
# The old path may be relative to the root path
|
||||
if not legacy_models_yaml_path.exists():
|
||||
legacy_models_yaml_path = Path(self._app_config.root_path, legacy_models_yaml_path)
|
||||
|
||||
if legacy_models_yaml_path.exists():
|
||||
legacy_models_yaml = yaml.safe_load(legacy_models_yaml_path.read_text())
|
||||
|
||||
yaml_metadata = legacy_models_yaml.pop("__metadata__")
|
||||
yaml_version = yaml_metadata.get("version")
|
||||
|
||||
if yaml_version != "3.0.0":
|
||||
raise ValueError(
|
||||
f"Attempted migration of unsupported `models.yaml` v{yaml_version}. Only v3.0.0 is supported. Exiting."
|
||||
)
|
||||
|
||||
self._logger.info(
|
||||
f"Starting one-time migration of {len(legacy_models_yaml.items())} models from {str(legacy_models_yaml_path)}. This may take a few minutes."
|
||||
)
|
||||
|
||||
if len(db_models) == 0 and len(legacy_models_yaml.items()) != 0:
|
||||
for model_key, stanza in legacy_models_yaml.items():
|
||||
_, _, model_name = str(model_key).split("/")
|
||||
model_path = Path(stanza["path"])
|
||||
if not model_path.is_absolute():
|
||||
model_path = self._app_config.models_path / model_path
|
||||
model_path = model_path.resolve()
|
||||
|
||||
config: dict[str, Any] = {}
|
||||
config["name"] = model_name
|
||||
config["description"] = stanza.get("description")
|
||||
config["config_path"] = stanza.get("config")
|
||||
|
||||
try:
|
||||
id = self.register_path(model_path=model_path, config=config)
|
||||
self._logger.info(f"Migrated {model_name} with id {id}")
|
||||
except Exception as e:
|
||||
self._logger.warning(f"Model at {model_path} could not be migrated: {e}")
|
||||
|
||||
# Rename `models.yaml` to `models.yaml.bak` to prevent re-migration
|
||||
legacy_models_yaml_path.rename(legacy_models_yaml_path.with_suffix(".yaml.bak"))
|
||||
|
||||
# Unset the path - we are done with it either way
|
||||
self._app_config.legacy_models_yaml_path = None
|
||||
|
||||
def scan_directory(self, scan_dir: Path, install: bool = False) -> List[str]: # noqa D102
|
||||
self._cached_model_paths = {Path(x.path).absolute() for x in self.record_store.all_models()}
|
||||
self._cached_model_paths = {Path(x.path).resolve() for x in self.record_store.all_models()}
|
||||
callback = self._scan_install if install else self._scan_register
|
||||
search = ModelSearch(on_model_found=callback, config=self._app_config)
|
||||
search = ModelSearch(on_model_found=callback)
|
||||
self._models_installed.clear()
|
||||
search.search(scan_dir)
|
||||
return list(self._models_installed)
|
||||
@ -299,7 +368,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
"""Unregister the model. Delete its files only if they are within our models directory."""
|
||||
model = self.record_store.get_model(key)
|
||||
models_dir = self.app_config.models_path
|
||||
model_path = models_dir / model.path
|
||||
model_path = models_dir / Path(model.path) # handle legacy relative model paths
|
||||
if model_path.is_relative_to(models_dir):
|
||||
self.unconditionally_delete(key)
|
||||
else:
|
||||
@ -307,11 +376,11 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
|
||||
def unconditionally_delete(self, key: str) -> None: # noqa D102
|
||||
model = self.record_store.get_model(key)
|
||||
path = self.app_config.models_path / model.path
|
||||
if path.is_dir():
|
||||
rmtree(path)
|
||||
model_path = self.app_config.models_path / model.path
|
||||
if model_path.is_dir():
|
||||
rmtree(model_path)
|
||||
else:
|
||||
path.unlink()
|
||||
model_path.unlink()
|
||||
self.unregister(key)
|
||||
|
||||
def download_and_cache(
|
||||
@ -322,7 +391,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
) -> Path:
|
||||
"""Download the model file located at source to the models cache and return its Path."""
|
||||
model_hash = sha256(str(source).encode("utf-8")).hexdigest()[0:32]
|
||||
model_path = self._app_config.models_convert_cache_path / model_hash
|
||||
model_path = self._app_config.convert_cache_path / model_hash
|
||||
|
||||
# We expect the cache directory to contain one and only one downloaded file.
|
||||
# We don't know the file's name in advance, as it is set by the download
|
||||
@ -350,20 +419,20 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
# Internal functions that manage the installer threads
|
||||
# --------------------------------------------------------------------------------------------
|
||||
def _start_installer_thread(self) -> None:
|
||||
threading.Thread(target=self._install_next_item, daemon=True).start()
|
||||
self._install_thread = threading.Thread(target=self._install_next_item, daemon=True)
|
||||
self._install_thread.start()
|
||||
self._running = True
|
||||
|
||||
def _install_next_item(self) -> None:
|
||||
done = False
|
||||
while not done:
|
||||
self._logger.debug(f"Installer thread {threading.get_ident()} starting")
|
||||
while True:
|
||||
if self._stop_event.is_set():
|
||||
done = True
|
||||
continue
|
||||
break
|
||||
self._logger.debug(f"Installer thread {threading.get_ident()} polling")
|
||||
try:
|
||||
job = self._install_queue.get(timeout=1)
|
||||
except Empty:
|
||||
continue
|
||||
|
||||
assert job.local_path is not None
|
||||
try:
|
||||
if job.cancelled:
|
||||
@ -372,41 +441,14 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
elif job.errored:
|
||||
self._signal_job_errored(job)
|
||||
|
||||
elif (
|
||||
job.waiting or job.downloads_done
|
||||
): # local jobs will be in waiting state, remote jobs will be downloading state
|
||||
job.total_bytes = self._stat_size(job.local_path)
|
||||
job.bytes = job.total_bytes
|
||||
self._signal_job_running(job)
|
||||
job.config_in["source"] = str(job.source)
|
||||
job.config_in["source_type"] = MODEL_SOURCE_TO_TYPE_MAP[job.source.__class__]
|
||||
# enter the metadata, if there is any
|
||||
if isinstance(job.source_metadata, (CivitaiMetadata, HuggingFaceMetadata)):
|
||||
job.config_in["source_api_response"] = job.source_metadata.api_response
|
||||
if isinstance(job.source_metadata, CivitaiMetadata) and job.source_metadata.trigger_phrases:
|
||||
job.config_in["trigger_phrases"] = job.source_metadata.trigger_phrases
|
||||
|
||||
if job.inplace:
|
||||
key = self.register_path(job.local_path, job.config_in)
|
||||
else:
|
||||
key = self.install_path(job.local_path, job.config_in)
|
||||
job.config_out = self.record_store.get_model(key)
|
||||
self._signal_job_completed(job)
|
||||
elif job.waiting or job.downloads_done:
|
||||
self._register_or_install(job)
|
||||
|
||||
except InvalidModelConfigException as excp:
|
||||
if any(x.content_type is not None and "text/html" in x.content_type for x in job.download_parts):
|
||||
job.set_error(
|
||||
InvalidModelConfigException(
|
||||
f"At least one file in {job.local_path} is an HTML page, not a model. This can happen when an access token is required to download."
|
||||
)
|
||||
)
|
||||
else:
|
||||
job.set_error(excp)
|
||||
self._signal_job_errored(job)
|
||||
self._set_error(job, excp)
|
||||
|
||||
except (OSError, DuplicateModelException) as excp:
|
||||
job.set_error(excp)
|
||||
self._signal_job_errored(job)
|
||||
self._set_error(job, excp)
|
||||
|
||||
finally:
|
||||
# if this is an install of a remote file, then clean up the temporary directory
|
||||
@ -414,8 +456,36 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
rmtree(job._install_tmpdir)
|
||||
self._install_completed_event.set()
|
||||
self._install_queue.task_done()
|
||||
self._logger.info(f"Installer thread {threading.get_ident()} exiting")
|
||||
|
||||
self._logger.info("Install thread exiting")
|
||||
def _register_or_install(self, job: ModelInstallJob) -> None:
|
||||
# local jobs will be in waiting state, remote jobs will be downloading state
|
||||
job.total_bytes = self._stat_size(job.local_path)
|
||||
job.bytes = job.total_bytes
|
||||
self._signal_job_running(job)
|
||||
job.config_in["source"] = str(job.source)
|
||||
job.config_in["source_type"] = MODEL_SOURCE_TO_TYPE_MAP[job.source.__class__]
|
||||
# enter the metadata, if there is any
|
||||
if isinstance(job.source_metadata, (HuggingFaceMetadata)):
|
||||
job.config_in["source_api_response"] = job.source_metadata.api_response
|
||||
|
||||
if job.inplace:
|
||||
key = self.register_path(job.local_path, job.config_in)
|
||||
else:
|
||||
key = self.install_path(job.local_path, job.config_in)
|
||||
job.config_out = self.record_store.get_model(key)
|
||||
self._signal_job_completed(job)
|
||||
|
||||
def _set_error(self, job: ModelInstallJob, excp: Exception) -> None:
|
||||
if any(x.content_type is not None and "text/html" in x.content_type for x in job.download_parts):
|
||||
job.set_error(
|
||||
InvalidModelConfigException(
|
||||
f"At least one file in {job.local_path} is an HTML page, not a model. This can happen when an access token is required to download."
|
||||
)
|
||||
)
|
||||
else:
|
||||
job.set_error(excp)
|
||||
self._signal_job_errored(job)
|
||||
|
||||
# --------------------------------------------------------------------------------------------
|
||||
# Internal functions that manage the models directory
|
||||
@ -450,7 +520,9 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
self._logger.info(f"Scanning {self._app_config.models_path} for new and orphaned models")
|
||||
for cur_base_model in BaseModelType:
|
||||
for cur_model_type in ModelType:
|
||||
models_dir = Path(cur_base_model.value, cur_model_type.value)
|
||||
models_dir = self._app_config.models_path / Path(cur_base_model.value, cur_model_type.value)
|
||||
if not models_dir.exists():
|
||||
continue
|
||||
installed.update(self.scan_directory(models_dir))
|
||||
self._logger.info(f"{len(installed)} new models registered; {len(defunct_models)} unregistered")
|
||||
|
||||
@ -469,13 +541,20 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
old_path = Path(model.path)
|
||||
models_dir = self.app_config.models_path
|
||||
|
||||
if not old_path.is_relative_to(models_dir):
|
||||
try:
|
||||
old_path.relative_to(models_dir)
|
||||
return model
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
new_path = models_dir / model.base.value / model.type.value / old_path.name
|
||||
|
||||
if old_path == new_path or new_path.exists() and old_path == new_path.resolve():
|
||||
return model
|
||||
|
||||
new_path = models_dir / model.base.value / model.type.value / model.name
|
||||
self._logger.info(f"Moving {model.name} to {new_path}.")
|
||||
new_path = self._move_model(old_path, new_path)
|
||||
model.path = new_path.relative_to(models_dir).as_posix()
|
||||
model.path = new_path.as_posix()
|
||||
self.record_store.update_model(key, ModelRecordChanges(path=model.path))
|
||||
return model
|
||||
|
||||
@ -533,22 +612,16 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
) -> str:
|
||||
config = config or {}
|
||||
|
||||
if self._app_config.skip_model_hash:
|
||||
config["hash"] = uuid_string()
|
||||
info = info or ModelProbe.probe(model_path, config, hash_algo=self._app_config.hashing_algorithm)
|
||||
|
||||
info = info or ModelProbe.probe(model_path, config)
|
||||
|
||||
model_path = model_path.absolute()
|
||||
if model_path.is_relative_to(self.app_config.models_path):
|
||||
model_path = model_path.relative_to(self.app_config.models_path)
|
||||
model_path = model_path.resolve()
|
||||
|
||||
info.path = model_path.as_posix()
|
||||
|
||||
# add 'main' specific fields
|
||||
# Checkpoints have a config file needed for conversion - resolve this to an absolute path
|
||||
if isinstance(info, CheckpointConfigBase):
|
||||
# make config relative to our root
|
||||
legacy_conf = (self.app_config.root_dir / self.app_config.legacy_conf_dir / info.config_path).resolve()
|
||||
info.config_path = legacy_conf.relative_to(self.app_config.root_dir).as_posix()
|
||||
legacy_conf = (self.app_config.legacy_conf_path / info.config_path).resolve()
|
||||
info.config_path = legacy_conf.as_posix()
|
||||
self.record_store.add_model(info)
|
||||
return info.key
|
||||
|
||||
@ -573,16 +646,6 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
inplace=source.inplace or False,
|
||||
)
|
||||
|
||||
def _import_from_civitai(self, source: CivitaiModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
|
||||
if not source.access_token:
|
||||
self._logger.info("No Civitai access token provided; some models may not be downloadable.")
|
||||
metadata = CivitaiMetadataFetch(self._session, self.app_config.get_config().civitai_api_key).from_id(
|
||||
str(source.version_id)
|
||||
)
|
||||
assert isinstance(metadata, ModelMetadataWithFiles)
|
||||
remote_files = metadata.download_urls(session=self._session)
|
||||
return self._import_remote_model(source=source, config=config, metadata=metadata, remote_files=remote_files)
|
||||
|
||||
def _import_from_hf(self, source: HFModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
|
||||
# Add user's cached access token to HuggingFace requests
|
||||
source.access_token = source.access_token or HfFolder.get_token()
|
||||
@ -605,7 +668,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
)
|
||||
|
||||
def _import_from_url(self, source: URLModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
|
||||
# URLs from Civitai or HuggingFace will be handled specially
|
||||
# URLs from HuggingFace will be handled specially
|
||||
metadata = None
|
||||
fetcher = None
|
||||
try:
|
||||
@ -613,8 +676,6 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
except ValueError:
|
||||
pass
|
||||
kwargs: dict[str, Any] = {"session": self._session}
|
||||
if fetcher is CivitaiMetadataFetch:
|
||||
kwargs["api_key"] = self._app_config.get_config().civitai_api_key
|
||||
if fetcher is not None:
|
||||
metadata = fetcher(**kwargs).from_url(source.url)
|
||||
self._logger.debug(f"metadata={metadata}")
|
||||
@ -631,7 +692,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
|
||||
def _import_remote_model(
|
||||
self,
|
||||
source: HFModelSource | CivitaiModelSource | URLModelSource,
|
||||
source: HFModelSource | URLModelSource,
|
||||
remote_files: List[RemoteModelFile],
|
||||
metadata: Optional[AnyModelRepoMetadata],
|
||||
config: Optional[Dict[str, Any]],
|
||||
@ -671,12 +732,13 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
install_job._install_tmpdir = tmpdir
|
||||
assert install_job.total_bytes is not None # to avoid type checking complaints in the loop below
|
||||
|
||||
self._logger.info(f"Queuing {source} for downloading")
|
||||
files_string = "file" if len(remote_files) == 1 else "file"
|
||||
self._logger.info(f"Queuing model install: {source} ({len(remote_files)} {files_string})")
|
||||
self._logger.debug(f"remote_files={remote_files}")
|
||||
for model_file in remote_files:
|
||||
url = model_file.url
|
||||
path = root / model_file.path.relative_to(subfolder)
|
||||
self._logger.info(f"Downloading {url} => {path}")
|
||||
self._logger.debug(f"Downloading {url} => {path}")
|
||||
install_job.total_bytes += model_file.size
|
||||
assert hasattr(source, "access_token")
|
||||
dest = tmpdir / path.parent
|
||||
@ -712,7 +774,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
# Callbacks are executed by the download queue in a separate thread
|
||||
# ------------------------------------------------------------------
|
||||
def _download_started_callback(self, download_job: DownloadJob) -> None:
|
||||
self._logger.info(f"{download_job.source}: model download started")
|
||||
self._logger.info(f"Model download started: {download_job.source}")
|
||||
with self._lock:
|
||||
install_job = self._download_cache[download_job.source]
|
||||
install_job.status = InstallStatus.DOWNLOADING
|
||||
@ -738,17 +800,17 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
self._signal_job_downloading(install_job)
|
||||
|
||||
def _download_complete_callback(self, download_job: DownloadJob) -> None:
|
||||
self._logger.info(f"{download_job.source}: model download complete")
|
||||
self._logger.info(f"Model download complete: {download_job.source}")
|
||||
with self._lock:
|
||||
install_job = self._download_cache[download_job.source]
|
||||
self._download_cache.pop(download_job.source, None)
|
||||
|
||||
# are there any more active jobs left in this task?
|
||||
if install_job.downloading and all(x.complete for x in install_job.download_parts):
|
||||
install_job.status = InstallStatus.DOWNLOADS_DONE
|
||||
self._install_queue.put(install_job)
|
||||
self._signal_job_downloads_done(install_job)
|
||||
self._put_in_queue(install_job)
|
||||
|
||||
# Let other threads know that the number of downloads has changed
|
||||
self._download_cache.pop(download_job.source, None)
|
||||
self._downloads_changed_event.set()
|
||||
|
||||
def _download_error_callback(self, download_job: DownloadJob, excp: Optional[Exception] = None) -> None:
|
||||
@ -771,7 +833,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
if not install_job:
|
||||
return
|
||||
self._downloads_changed_event.set()
|
||||
self._logger.warning(f"{download_job.source}: model download cancelled")
|
||||
self._logger.warning(f"Model download canceled: {download_job.source}")
|
||||
# if install job has already registered an error, then do not replace its status with cancelled
|
||||
if not install_job.errored:
|
||||
install_job.cancel()
|
||||
@ -788,14 +850,14 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
|
||||
if all(x.in_terminal_state for x in install_job.download_parts):
|
||||
# When all parts have reached their terminal state, we finalize the job to clean up the temporary directory and other resources
|
||||
self._install_queue.put(install_job)
|
||||
self._put_in_queue(install_job)
|
||||
|
||||
# ------------------------------------------------------------------------------------------------
|
||||
# Internal methods that put events on the event bus
|
||||
# ------------------------------------------------------------------------------------------------
|
||||
def _signal_job_running(self, job: ModelInstallJob) -> None:
|
||||
job.status = InstallStatus.RUNNING
|
||||
self._logger.info(f"{job.source}: model installation started")
|
||||
self._logger.info(f"Model install started: {job.source}")
|
||||
if self._event_bus:
|
||||
self._event_bus.emit_model_install_running(str(job.source))
|
||||
|
||||
@ -821,12 +883,17 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
id=job.id,
|
||||
)
|
||||
|
||||
def _signal_job_downloads_done(self, job: ModelInstallJob) -> None:
|
||||
job.status = InstallStatus.DOWNLOADS_DONE
|
||||
self._logger.info(f"Model download complete: {job.source}")
|
||||
if self._event_bus:
|
||||
self._event_bus.emit_model_install_downloads_done(str(job.source))
|
||||
|
||||
def _signal_job_completed(self, job: ModelInstallJob) -> None:
|
||||
job.status = InstallStatus.COMPLETED
|
||||
assert job.config_out
|
||||
self._logger.info(
|
||||
f"{job.source}: model installation completed. {job.local_path} registered key {job.config_out.key}"
|
||||
)
|
||||
self._logger.info(f"Model install complete: {job.source}")
|
||||
self._logger.debug(f"{job.local_path} registered key {job.config_out.key}")
|
||||
if self._event_bus:
|
||||
assert job.local_path is not None
|
||||
assert job.config_out is not None
|
||||
@ -834,7 +901,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
self._event_bus.emit_model_install_completed(str(job.source), key, id=job.id)
|
||||
|
||||
def _signal_job_errored(self, job: ModelInstallJob) -> None:
|
||||
self._logger.info(f"{job.source}: model installation encountered an exception: {job.error_type}\n{job.error}")
|
||||
self._logger.info(f"Model install error: {job.source}, {job.error_type}\n{job.error}")
|
||||
if self._event_bus:
|
||||
error_type = job.error_type
|
||||
error = job.error
|
||||
@ -843,14 +910,12 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
self._event_bus.emit_model_install_error(str(job.source), error_type, error, id=job.id)
|
||||
|
||||
def _signal_job_cancelled(self, job: ModelInstallJob) -> None:
|
||||
self._logger.info(f"{job.source}: model installation was cancelled")
|
||||
self._logger.info(f"Model install canceled: {job.source}")
|
||||
if self._event_bus:
|
||||
self._event_bus.emit_model_install_cancelled(str(job.source))
|
||||
self._event_bus.emit_model_install_cancelled(str(job.source), id=job.id)
|
||||
|
||||
@staticmethod
|
||||
def get_fetcher_from_url(url: str):
|
||||
if re.match(r"^https?://civitai.com/", url.lower()):
|
||||
return CivitaiMetadataFetch
|
||||
elif re.match(r"^https?://huggingface.co/[^/]+/[^/]+$", url.lower()):
|
||||
def get_fetcher_from_url(url: str) -> ModelMetadataFetchBase:
|
||||
if re.match(r"^https?://huggingface.co/[^/]+/[^/]+$", url.lower()):
|
||||
return HuggingFaceMetadataFetch
|
||||
raise ValueError(f"Unsupported model source: '{url}'")
|
||||
|
@ -68,6 +68,7 @@ class ModelLoadService(ModelLoadServiceBase):
|
||||
self._emit_load_event(
|
||||
context_data=context_data,
|
||||
model_config=model_config,
|
||||
submodel_type=submodel_type,
|
||||
)
|
||||
|
||||
implementation, model_config, submodel_type = self._registry.get_implementation(model_config, submodel_type) # type: ignore
|
||||
@ -82,6 +83,7 @@ class ModelLoadService(ModelLoadServiceBase):
|
||||
self._emit_load_event(
|
||||
context_data=context_data,
|
||||
model_config=model_config,
|
||||
submodel_type=submodel_type,
|
||||
loaded=True,
|
||||
)
|
||||
return loaded_model
|
||||
@ -91,6 +93,7 @@ class ModelLoadService(ModelLoadServiceBase):
|
||||
context_data: InvocationContextData,
|
||||
model_config: AnyModelConfig,
|
||||
loaded: Optional[bool] = False,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> None:
|
||||
if not self._invoker:
|
||||
return
|
||||
@ -102,6 +105,7 @@ class ModelLoadService(ModelLoadServiceBase):
|
||||
queue_batch_id=context_data.queue_item.batch_id,
|
||||
graph_execution_state_id=context_data.queue_item.session_id,
|
||||
model_config=model_config,
|
||||
submodel_type=submodel_type,
|
||||
)
|
||||
else:
|
||||
self._invoker.services.events.emit_model_load_completed(
|
||||
@ -110,4 +114,5 @@ class ModelLoadService(ModelLoadServiceBase):
|
||||
queue_batch_id=context_data.queue_item.batch_id,
|
||||
graph_execution_state_id=context_data.queue_item.session_id,
|
||||
model_config=model_config,
|
||||
submodel_type=submodel_type,
|
||||
)
|
||||
|
@ -1,15 +1,11 @@
|
||||
# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Team
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from typing_extensions import Self
|
||||
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContextData
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelType, SubModelType
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel
|
||||
|
||||
from ..config import InvokeAIAppConfig
|
||||
from ..download import DownloadQueueServiceBase
|
||||
@ -70,32 +66,3 @@ class ModelManagerServiceBase(ABC):
|
||||
@abstractmethod
|
||||
def stop(self, invoker: Invoker) -> None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def load_model_by_config(
|
||||
self,
|
||||
model_config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
context_data: Optional[InvocationContextData] = None,
|
||||
) -> LoadedModel:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def load_model_by_key(
|
||||
self,
|
||||
key: str,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
context_data: Optional[InvocationContextData] = None,
|
||||
) -> LoadedModel:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def load_model_by_attr(
|
||||
self,
|
||||
model_name: str,
|
||||
base_model: BaseModelType,
|
||||
model_type: ModelType,
|
||||
submodel: Optional[SubModelType] = None,
|
||||
context_data: Optional[InvocationContextData] = None,
|
||||
) -> LoadedModel:
|
||||
pass
|
||||
|
@ -1,14 +1,10 @@
|
||||
# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Team
|
||||
"""Implementation of ModelManagerServiceBase."""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from typing_extensions import Self
|
||||
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContextData
|
||||
from invokeai.backend.model_manager import AnyModelConfig, BaseModelType, LoadedModel, ModelType, SubModelType
|
||||
from invokeai.backend.model_manager.load import ModelCache, ModelConvertCache, ModelLoaderRegistry
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
@ -18,7 +14,7 @@ from ..download import DownloadQueueServiceBase
|
||||
from ..events.events_base import EventServiceBase
|
||||
from ..model_install import ModelInstallService, ModelInstallServiceBase
|
||||
from ..model_load import ModelLoadService, ModelLoadServiceBase
|
||||
from ..model_records import ModelRecordServiceBase, UnknownModelException
|
||||
from ..model_records import ModelRecordServiceBase
|
||||
from .model_manager_base import ModelManagerServiceBase
|
||||
|
||||
|
||||
@ -64,56 +60,6 @@ class ModelManagerService(ModelManagerServiceBase):
|
||||
if hasattr(service, "stop"):
|
||||
service.stop(invoker)
|
||||
|
||||
def load_model_by_config(
|
||||
self,
|
||||
model_config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
context_data: Optional[InvocationContextData] = None,
|
||||
) -> LoadedModel:
|
||||
return self.load.load_model(model_config, submodel_type, context_data)
|
||||
|
||||
def load_model_by_key(
|
||||
self,
|
||||
key: str,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
context_data: Optional[InvocationContextData] = None,
|
||||
) -> LoadedModel:
|
||||
config = self.store.get_model(key)
|
||||
return self.load.load_model(config, submodel_type, context_data)
|
||||
|
||||
def load_model_by_attr(
|
||||
self,
|
||||
model_name: str,
|
||||
base_model: BaseModelType,
|
||||
model_type: ModelType,
|
||||
submodel: Optional[SubModelType] = None,
|
||||
context_data: Optional[InvocationContextData] = None,
|
||||
) -> LoadedModel:
|
||||
"""
|
||||
Given a model's attributes, search the database for it, and if found, load and return the LoadedModel object.
|
||||
|
||||
This is provided for API compatability with the get_model() method
|
||||
in the original model manager. However, note that LoadedModel is
|
||||
not the same as the original ModelInfo that ws returned.
|
||||
|
||||
:param model_name: Name of to be fetched.
|
||||
:param base_model: Base model
|
||||
:param model_type: Type of the model
|
||||
:param submodel: For main (pipeline models), the submodel to fetch
|
||||
:param context: The invocation context.
|
||||
|
||||
Exceptions: UnknownModelException -- model with this key not known
|
||||
NotImplementedException -- a model loader was not provided at initialization time
|
||||
ValueError -- more than one model matches this combination
|
||||
"""
|
||||
configs = self.store.search_by_attr(model_name, base_model, model_type)
|
||||
if len(configs) == 0:
|
||||
raise UnknownModelException(f"{base_model}/{model_type}/{model_name}: Unknown model")
|
||||
elif len(configs) > 1:
|
||||
raise ValueError(f"{base_model}/{model_type}/{model_name}: More than one model matches.")
|
||||
else:
|
||||
return self.load.load_model(configs[0], submodel, context_data)
|
||||
|
||||
@classmethod
|
||||
def build_model_manager(
|
||||
cls,
|
||||
@ -132,14 +78,12 @@ class ModelManagerService(ModelManagerServiceBase):
|
||||
logger.setLevel(app_config.log_level.upper())
|
||||
|
||||
ram_cache = ModelCache(
|
||||
max_cache_size=app_config.ram_cache_size,
|
||||
max_vram_cache_size=app_config.vram_cache_size,
|
||||
max_cache_size=app_config.ram,
|
||||
max_vram_cache_size=app_config.vram,
|
||||
logger=logger,
|
||||
execution_device=execution_device,
|
||||
)
|
||||
convert_cache = ModelConvertCache(
|
||||
cache_path=app_config.models_convert_cache_path, max_size=app_config.convert_cache_size
|
||||
)
|
||||
convert_cache = ModelConvertCache(cache_path=app_config.convert_cache_path, max_size=app_config.convert_cache)
|
||||
loader = ModelLoadService(
|
||||
app_config=app_config,
|
||||
ram_cache=ram_cache,
|
||||
|
@ -18,7 +18,12 @@ from invokeai.backend.model_manager import (
|
||||
ModelFormat,
|
||||
ModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.config import ModelDefaultSettings, ModelVariantType, SchedulerPredictionType
|
||||
from invokeai.backend.model_manager.config import (
|
||||
ControlAdapterDefaultSettings,
|
||||
MainModelDefaultSettings,
|
||||
ModelVariantType,
|
||||
SchedulerPredictionType,
|
||||
)
|
||||
|
||||
|
||||
class DuplicateModelException(Exception):
|
||||
@ -68,7 +73,7 @@ class ModelRecordChanges(BaseModelExcludeNull):
|
||||
description: Optional[str] = Field(description="Model description", default=None)
|
||||
base: Optional[BaseModelType] = Field(description="The base model.", default=None)
|
||||
trigger_phrases: Optional[set[str]] = Field(description="Set of trigger phrases for this model", default=None)
|
||||
default_settings: Optional[ModelDefaultSettings] = Field(
|
||||
default_settings: Optional[MainModelDefaultSettings | ControlAdapterDefaultSettings] = Field(
|
||||
description="Default settings for this model", default=None
|
||||
)
|
||||
|
||||
@ -79,6 +84,7 @@ class ModelRecordChanges(BaseModelExcludeNull):
|
||||
description="The prediction type of the model.", default=None
|
||||
)
|
||||
upcast_attention: Optional[bool] = Field(description="Whether to upcast attention.", default=None)
|
||||
config_path: Optional[str] = Field(description="Path to config file for model", default=None)
|
||||
|
||||
|
||||
class ModelRecordServiceBase(ABC):
|
||||
@ -129,6 +135,17 @@ class ModelRecordServiceBase(ABC):
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_model_by_hash(self, hash: str) -> AnyModelConfig:
|
||||
"""
|
||||
Retrieve the configuration for the indicated model.
|
||||
|
||||
:param hash: Hash of model config to be fetched.
|
||||
|
||||
Exceptions: UnknownModelException
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def list_models(
|
||||
self, page: int = 0, per_page: int = 10, order_by: ModelRecordOrderBy = ModelRecordOrderBy.Default
|
||||
|
@ -203,6 +203,21 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
|
||||
return model
|
||||
|
||||
def get_model_by_hash(self, hash: str) -> AnyModelConfig:
|
||||
with self._db.lock:
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
WHERE hash=?;
|
||||
""",
|
||||
(hash,),
|
||||
)
|
||||
rows = self._cursor.fetchone()
|
||||
if not rows:
|
||||
raise UnknownModelException("model not found")
|
||||
model = ModelConfigFactory.make_config(json.loads(rows[0]), timestamp=rows[1])
|
||||
return model
|
||||
|
||||
def exists(self, key: str) -> bool:
|
||||
"""
|
||||
Return True if a model with the indicated key exists in the databse.
|
||||
@ -227,6 +242,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
base_model: Optional[BaseModelType] = None,
|
||||
model_type: Optional[ModelType] = None,
|
||||
model_format: Optional[ModelFormat] = None,
|
||||
order_by: ModelRecordOrderBy = ModelRecordOrderBy.Default,
|
||||
) -> List[AnyModelConfig]:
|
||||
"""
|
||||
Return models matching name, base and/or type.
|
||||
@ -235,10 +251,21 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
:param base_model: Filter by base model (optional)
|
||||
:param model_type: Filter by type of model (optional)
|
||||
:param model_format: Filter by model format (e.g. "diffusers") (optional)
|
||||
:param order_by: Result order
|
||||
|
||||
If none of the optional filters are passed, will return all
|
||||
models in the database.
|
||||
"""
|
||||
|
||||
assert isinstance(order_by, ModelRecordOrderBy)
|
||||
ordering = {
|
||||
ModelRecordOrderBy.Default: "type, base, name, format",
|
||||
ModelRecordOrderBy.Type: "type",
|
||||
ModelRecordOrderBy.Base: "base",
|
||||
ModelRecordOrderBy.Name: "name",
|
||||
ModelRecordOrderBy.Format: "format",
|
||||
}
|
||||
|
||||
where_clause: list[str] = []
|
||||
bindings: list[str] = []
|
||||
if model_name:
|
||||
@ -257,8 +284,10 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
with self._db.lock:
|
||||
self._cursor.execute(
|
||||
f"""--sql
|
||||
SELECT config, strftime('%s',updated_at) FROM models
|
||||
{where};
|
||||
SELECT config, strftime('%s',updated_at)
|
||||
FROM models
|
||||
{where}
|
||||
ORDER BY {ordering[order_by]} -- using ? to bind doesn't work here for some reason;
|
||||
""",
|
||||
tuple(bindings),
|
||||
)
|
||||
@ -304,7 +333,7 @@ class ModelRecordServiceSQL(ModelRecordServiceBase):
|
||||
"""Return a paginated summary listing of each model in the database."""
|
||||
assert isinstance(order_by, ModelRecordOrderBy)
|
||||
ordering = {
|
||||
ModelRecordOrderBy.Default: "type, base, format, name",
|
||||
ModelRecordOrderBy.Default: "type, base, name, format",
|
||||
ModelRecordOrderBy.Type: "type",
|
||||
ModelRecordOrderBy.Base: "base",
|
||||
ModelRecordOrderBy.Name: "name",
|
||||
|
@ -151,7 +151,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
|
||||
# TODO: how does this work in a multi-user scenario?
|
||||
current_queue_size = self._get_current_queue_size(queue_id)
|
||||
max_queue_size = self.__invoker.services.configuration.get_config().max_queue_size
|
||||
max_queue_size = self.__invoker.services.configuration.max_queue_size
|
||||
max_new_queue_items = max_queue_size - current_queue_size
|
||||
|
||||
priority = 0
|
||||
|
@ -1,7 +1,7 @@
|
||||
import threading
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
from typing import TYPE_CHECKING, Optional, Union
|
||||
|
||||
from PIL.Image import Image
|
||||
from torch import Tensor
|
||||
@ -13,15 +13,16 @@ from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
|
||||
from invokeai.app.services.images.images_common import ImageDTO
|
||||
from invokeai.app.services.invocation_services import InvocationServices
|
||||
from invokeai.app.services.model_records.model_records_base import UnknownModelException
|
||||
from invokeai.app.util.step_callback import stable_diffusion_step_callback
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType, ModelFormat, ModelType, SubModelType
|
||||
from invokeai.backend.model_manager.load.load_base import LoadedModel
|
||||
from invokeai.backend.model_manager.metadata.metadata_base import AnyModelRepoMetadata
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.services.session_queue.session_queue_common import SessionQueueItem
|
||||
|
||||
"""
|
||||
@ -299,22 +300,27 @@ class ConditioningInterface(InvocationContextInterface):
|
||||
|
||||
|
||||
class ModelsInterface(InvocationContextInterface):
|
||||
def exists(self, key: str) -> bool:
|
||||
def exists(self, identifier: Union[str, "ModelIdentifierField"]) -> bool:
|
||||
"""Checks if a model exists.
|
||||
|
||||
Args:
|
||||
key: The key of the model.
|
||||
identifier: The key or ModelField representing the model.
|
||||
|
||||
Returns:
|
||||
True if the model exists, False if not.
|
||||
"""
|
||||
return self._services.model_manager.store.exists(key)
|
||||
if isinstance(identifier, str):
|
||||
return self._services.model_manager.store.exists(identifier)
|
||||
|
||||
def load(self, key: str, submodel_type: Optional[SubModelType] = None) -> LoadedModel:
|
||||
return self._services.model_manager.store.exists(identifier.key)
|
||||
|
||||
def load(
|
||||
self, identifier: Union[str, "ModelIdentifierField"], submodel_type: Optional[SubModelType] = None
|
||||
) -> LoadedModel:
|
||||
"""Loads a model.
|
||||
|
||||
Args:
|
||||
key: The key of the model.
|
||||
identifier: The key or ModelField representing the model.
|
||||
submodel_type: The submodel of the model to get.
|
||||
|
||||
Returns:
|
||||
@ -324,9 +330,13 @@ class ModelsInterface(InvocationContextInterface):
|
||||
# The model manager emits events as it loads the model. It needs the context data to build
|
||||
# the event payloads.
|
||||
|
||||
return self._services.model_manager.load_model_by_key(
|
||||
key=key, submodel_type=submodel_type, context_data=self._data
|
||||
)
|
||||
if isinstance(identifier, str):
|
||||
model = self._services.model_manager.store.get_model(identifier)
|
||||
return self._services.model_manager.load.load_model(model, submodel_type, self._data)
|
||||
else:
|
||||
_submodel_type = submodel_type or identifier.submodel_type
|
||||
model = self._services.model_manager.store.get_model(identifier.key)
|
||||
return self._services.model_manager.load.load_model(model, _submodel_type, self._data)
|
||||
|
||||
def load_by_attrs(
|
||||
self, name: str, base: BaseModelType, type: ModelType, submodel_type: Optional[SubModelType] = None
|
||||
@ -343,35 +353,29 @@ class ModelsInterface(InvocationContextInterface):
|
||||
Returns:
|
||||
An object representing the loaded model.
|
||||
"""
|
||||
return self._services.model_manager.load_model_by_attr(
|
||||
model_name=name,
|
||||
base_model=base,
|
||||
model_type=type,
|
||||
submodel=submodel_type,
|
||||
context_data=self._data,
|
||||
)
|
||||
|
||||
def get_config(self, key: str) -> AnyModelConfig:
|
||||
configs = self._services.model_manager.store.search_by_attr(model_name=name, base_model=base, model_type=type)
|
||||
if len(configs) == 0:
|
||||
raise UnknownModelException(f"No model found with name {name}, base {base}, and type {type}")
|
||||
|
||||
if len(configs) > 1:
|
||||
raise ValueError(f"More than one model found with name {name}, base {base}, and type {type}")
|
||||
|
||||
return self._services.model_manager.load.load_model(configs[0], submodel_type, self._data)
|
||||
|
||||
def get_config(self, identifier: Union[str, "ModelIdentifierField"]) -> AnyModelConfig:
|
||||
"""Gets a model's config.
|
||||
|
||||
Args:
|
||||
key: The key of the model.
|
||||
identifier: The key or ModelField representing the model.
|
||||
|
||||
Returns:
|
||||
The model's config.
|
||||
"""
|
||||
return self._services.model_manager.store.get_model(key=key)
|
||||
if isinstance(identifier, str):
|
||||
return self._services.model_manager.store.get_model(identifier)
|
||||
|
||||
def get_metadata(self, key: str) -> Optional[AnyModelRepoMetadata]:
|
||||
"""Gets a model's metadata, if it has any.
|
||||
|
||||
Args:
|
||||
key: The key of the model.
|
||||
|
||||
Returns:
|
||||
The model's metadata, if it has any.
|
||||
"""
|
||||
return self._services.model_manager.store.get_metadata(key=key)
|
||||
return self._services.model_manager.store.get_model(identifier.key)
|
||||
|
||||
def search_by_path(self, path: Path) -> list[AnyModelConfig]:
|
||||
"""Searches for models by path.
|
||||
@ -419,7 +423,7 @@ class ConfigInterface(InvocationContextInterface):
|
||||
The app's config.
|
||||
"""
|
||||
|
||||
return self._services.configuration.get_config()
|
||||
return self._services.configuration
|
||||
|
||||
|
||||
class UtilInterface(InvocationContextInterface):
|
||||
|
@ -4,8 +4,6 @@ from logging import Logger
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.shared.sqlite_migrator.sqlite_migrator_common import Migration
|
||||
|
||||
from .util.migrate_yaml_config_1 import MigrateModelYamlToDb1
|
||||
|
||||
|
||||
class Migration3Callback:
|
||||
def __init__(self, app_config: InvokeAIAppConfig, logger: Logger) -> None:
|
||||
@ -15,7 +13,6 @@ class Migration3Callback:
|
||||
def __call__(self, cursor: sqlite3.Cursor) -> None:
|
||||
self._drop_model_manager_metadata(cursor)
|
||||
self._recreate_model_config(cursor)
|
||||
self._migrate_model_config_records(cursor)
|
||||
|
||||
def _drop_model_manager_metadata(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""Drops the `model_manager_metadata` table."""
|
||||
@ -55,12 +52,6 @@ class Migration3Callback:
|
||||
"""
|
||||
)
|
||||
|
||||
def _migrate_model_config_records(self, cursor: sqlite3.Cursor) -> None:
|
||||
"""After updating the model config table, we repopulate it."""
|
||||
self._logger.info("Migrating model config records from models.yaml to database")
|
||||
model_record_migrator = MigrateModelYamlToDb1(self._app_config, self._logger, cursor)
|
||||
model_record_migrator.migrate()
|
||||
|
||||
|
||||
def build_migration_3(app_config: InvokeAIAppConfig, logger: Logger) -> Migration:
|
||||
"""
|
||||
|
@ -1,163 +0,0 @@
|
||||
# Copyright (c) 2023 Lincoln D. Stein
|
||||
"""Migrate from the InvokeAI v2 models.yaml format to the v3 sqlite format."""
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from logging import Logger
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from omegaconf import DictConfig, OmegaConf
|
||||
from pydantic import TypeAdapter
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.model_records import (
|
||||
DuplicateModelException,
|
||||
UnknownModelException,
|
||||
)
|
||||
from invokeai.backend.model_manager.config import (
|
||||
AnyModelConfig,
|
||||
BaseModelType,
|
||||
ModelConfigFactory,
|
||||
ModelType,
|
||||
)
|
||||
from invokeai.backend.model_manager.hash import ModelHash
|
||||
|
||||
ModelsValidator = TypeAdapter(AnyModelConfig)
|
||||
|
||||
|
||||
class MigrateModelYamlToDb1:
|
||||
"""
|
||||
Migrate the InvokeAI models.yaml format (VERSION 3.0.0) to SQL3 database format (VERSION 3.5.0).
|
||||
|
||||
The class has one externally useful method, migrate(), which scans the
|
||||
currently models.yaml file and imports all its entries into invokeai.db.
|
||||
|
||||
Use this way:
|
||||
|
||||
from invokeai.backend.model_manager/migrate_to_db import MigrateModelYamlToDb
|
||||
MigrateModelYamlToDb().migrate()
|
||||
|
||||
"""
|
||||
|
||||
config: InvokeAIAppConfig
|
||||
logger: Logger
|
||||
cursor: sqlite3.Cursor
|
||||
|
||||
def __init__(self, config: InvokeAIAppConfig, logger: Logger, cursor: sqlite3.Cursor = None) -> None:
|
||||
self.config = config
|
||||
self.logger = logger
|
||||
self.cursor = cursor
|
||||
|
||||
def get_yaml(self) -> DictConfig:
|
||||
"""Fetch the models.yaml DictConfig for this installation."""
|
||||
yaml_path = self.config.model_conf_path
|
||||
omegaconf = OmegaConf.load(yaml_path)
|
||||
assert isinstance(omegaconf, DictConfig)
|
||||
return omegaconf
|
||||
|
||||
def migrate(self) -> None:
|
||||
"""Do the migration from models.yaml to invokeai.db."""
|
||||
try:
|
||||
yaml = self.get_yaml()
|
||||
except OSError:
|
||||
return
|
||||
|
||||
for model_key, stanza in yaml.items():
|
||||
if model_key == "__metadata__":
|
||||
assert (
|
||||
stanza["version"] == "3.0.0"
|
||||
), f"This script works on version 3.0.0 yaml files, but your configuration points to a {stanza['version']} version"
|
||||
continue
|
||||
|
||||
base_type, model_type, model_name = str(model_key).split("/")
|
||||
try:
|
||||
hash = ModelHash().hash(self.config.models_path / stanza.path)
|
||||
except OSError:
|
||||
self.logger.warning(f"The model at {stanza.path} is not a valid file or directory. Skipping migration.")
|
||||
continue
|
||||
|
||||
stanza["base"] = BaseModelType(base_type)
|
||||
stanza["type"] = ModelType(model_type)
|
||||
stanza["name"] = model_name
|
||||
stanza["original_hash"] = hash
|
||||
stanza["current_hash"] = hash
|
||||
new_key = hash # deterministic key assignment
|
||||
|
||||
# special case for ip adapters, which need the new `image_encoder_model_id` field
|
||||
if stanza["type"] == ModelType.IPAdapter:
|
||||
try:
|
||||
stanza["image_encoder_model_id"] = self._get_image_encoder_model_id(
|
||||
self.config.models_path / stanza.path
|
||||
)
|
||||
except OSError:
|
||||
self.logger.warning(f"Could not determine image encoder for {stanza.path}. Skipping.")
|
||||
continue
|
||||
|
||||
new_config: AnyModelConfig = ModelsValidator.validate_python(stanza) # type: ignore # see https://github.com/pydantic/pydantic/discussions/7094
|
||||
|
||||
try:
|
||||
if original_record := self._search_by_path(stanza.path):
|
||||
key = original_record.key
|
||||
self.logger.info(f"Updating model {model_name} with information from models.yaml using key {key}")
|
||||
self._update_model(key, new_config)
|
||||
else:
|
||||
self.logger.info(f"Adding model {model_name} with key {new_key}")
|
||||
self._add_model(new_key, new_config)
|
||||
except DuplicateModelException:
|
||||
self.logger.warning(f"Model {model_name} is already in the database")
|
||||
except UnknownModelException:
|
||||
self.logger.warning(f"Model at {stanza.path} could not be found in database")
|
||||
|
||||
def _search_by_path(self, path: Path) -> Optional[AnyModelConfig]:
|
||||
self.cursor.execute(
|
||||
"""--sql
|
||||
SELECT config FROM model_config
|
||||
WHERE path=?;
|
||||
""",
|
||||
(str(path),),
|
||||
)
|
||||
results = [ModelConfigFactory.make_config(json.loads(x[0])) for x in self.cursor.fetchall()]
|
||||
return results[0] if results else None
|
||||
|
||||
def _update_model(self, key: str, config: AnyModelConfig) -> None:
|
||||
record = ModelConfigFactory.make_config(config, key=key) # ensure it is a valid config obect
|
||||
json_serialized = record.model_dump_json() # and turn it into a json string.
|
||||
self.cursor.execute(
|
||||
"""--sql
|
||||
UPDATE model_config
|
||||
SET
|
||||
config=?
|
||||
WHERE id=?;
|
||||
""",
|
||||
(json_serialized, key),
|
||||
)
|
||||
if self.cursor.rowcount == 0:
|
||||
raise UnknownModelException("model not found")
|
||||
|
||||
def _add_model(self, key: str, config: AnyModelConfig) -> None:
|
||||
record = ModelConfigFactory.make_config(config, key=key) # ensure it is a valid config obect.
|
||||
json_serialized = record.model_dump_json() # and turn it into a json string.
|
||||
try:
|
||||
self.cursor.execute(
|
||||
"""--sql
|
||||
INSERT INTO model_config (
|
||||
id,
|
||||
original_hash,
|
||||
config
|
||||
)
|
||||
VALUES (?,?,?);
|
||||
""",
|
||||
(
|
||||
key,
|
||||
record.hash,
|
||||
json_serialized,
|
||||
),
|
||||
)
|
||||
except sqlite3.IntegrityError as exc:
|
||||
raise DuplicateModelException(f"{record.name}: model is already in database") from exc
|
||||
|
||||
def _get_image_encoder_model_id(self, model_path: Path) -> str:
|
||||
with open(model_path / "image_encoder.txt") as f:
|
||||
encoder = f.read()
|
||||
return encoder.strip()
|
@ -8,3 +8,8 @@ class UrlServiceBase(ABC):
|
||||
def get_image_url(self, image_name: str, thumbnail: bool = False) -> str:
|
||||
"""Gets the URL for an image or thumbnail."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_model_image_url(self, model_key: str) -> str:
|
||||
"""Gets the URL for a model image"""
|
||||
pass
|
||||
|
@ -4,8 +4,9 @@ from .urls_base import UrlServiceBase
|
||||
|
||||
|
||||
class LocalUrlService(UrlServiceBase):
|
||||
def __init__(self, base_url: str = "api/v1"):
|
||||
def __init__(self, base_url: str = "api/v1", base_url_v2: str = "api/v2"):
|
||||
self._base_url = base_url
|
||||
self._base_url_v2 = base_url_v2
|
||||
|
||||
def get_image_url(self, image_name: str, thumbnail: bool = False) -> str:
|
||||
image_basename = os.path.basename(image_name)
|
||||
@ -15,3 +16,6 @@ class LocalUrlService(UrlServiceBase):
|
||||
return f"{self._base_url}/images/i/{image_basename}/thumbnail"
|
||||
|
||||
return f"{self._base_url}/images/i/{image_basename}/full"
|
||||
|
||||
def get_model_image_url(self, model_key: str) -> str:
|
||||
return f"{self._base_url_v2}/models/i/{model_key}/image"
|
||||
|
51
invokeai/app/util/download_with_progress.py
Normal file
51
invokeai/app/util/download_with_progress.py
Normal file
@ -0,0 +1,51 @@
|
||||
from pathlib import Path
|
||||
from urllib import request
|
||||
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
|
||||
class ProgressBar:
|
||||
"""Simple progress bar for urllib.request.urlretrieve using tqdm."""
|
||||
|
||||
def __init__(self, model_name: str = "file"):
|
||||
self.pbar = None
|
||||
self.name = model_name
|
||||
|
||||
def __call__(self, block_num: int, block_size: int, total_size: int):
|
||||
if not self.pbar:
|
||||
self.pbar = tqdm(
|
||||
desc=self.name,
|
||||
initial=0,
|
||||
unit="iB",
|
||||
unit_scale=True,
|
||||
unit_divisor=1000,
|
||||
total=total_size,
|
||||
)
|
||||
self.pbar.update(block_size)
|
||||
|
||||
|
||||
def download_with_progress_bar(name: str, url: str, dest_path: Path) -> bool:
|
||||
"""Download a file from a URL to a destination path, with a progress bar.
|
||||
If the file already exists, it will not be downloaded again.
|
||||
|
||||
Exceptions are not caught.
|
||||
|
||||
Args:
|
||||
name (str): Name of the file being downloaded.
|
||||
url (str): URL to download the file from.
|
||||
dest_path (Path): Destination path to save the file to.
|
||||
|
||||
Returns:
|
||||
bool: True if the file was downloaded, False if it already existed.
|
||||
"""
|
||||
if dest_path.exists():
|
||||
return False # already downloaded
|
||||
|
||||
InvokeAILogger.get_logger().info(f"Downloading {name}...")
|
||||
|
||||
dest_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
request.urlretrieve(url, dest_path, ProgressBar(name))
|
||||
|
||||
return True
|
24
invokeai/app/util/suppress_output.py
Normal file
24
invokeai/app/util/suppress_output.py
Normal file
@ -0,0 +1,24 @@
|
||||
import io
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
|
||||
class SuppressOutput:
|
||||
"""Context manager to suppress stdout.
|
||||
|
||||
Example:
|
||||
```
|
||||
with SuppressOutput():
|
||||
print("This will not be printed")
|
||||
```
|
||||
"""
|
||||
|
||||
def __enter__(self):
|
||||
# Save the original stdout
|
||||
self._original_stdout = sys.stdout
|
||||
# Redirect stdout to a dummy StringIO object
|
||||
sys.stdout = io.StringIO()
|
||||
|
||||
def __exit__(self, *args: Any, **kwargs: Any):
|
||||
# Restore stdout
|
||||
sys.stdout = self._original_stdout
|
@ -22,7 +22,7 @@ def generate_ti_list(
|
||||
for trigger in extract_ti_triggers_from_prompt(prompt):
|
||||
name_or_key = trigger[1:-1]
|
||||
try:
|
||||
loaded_model = context.models.load(key=name_or_key)
|
||||
loaded_model = context.models.load(name_or_key)
|
||||
model = loaded_model.model
|
||||
assert isinstance(model, TextualInversionModelRaw)
|
||||
assert loaded_model.config.base == base
|
||||
|
@ -1,4 +0,0 @@
|
||||
"""Initialization file for invokeai.backend.embeddings modules."""
|
||||
|
||||
# from .model_patcher import ModelPatcher
|
||||
# __all__ = ["ModelPatcher"]
|
@ -1,12 +0,0 @@
|
||||
"""Base class for LoRA and Textual Inversion models.
|
||||
|
||||
The EmbeddingRaw class is the base class of LoRAModelRaw and TextualInversionModelRaw,
|
||||
and is used for type checking of calls to the model patcher.
|
||||
|
||||
The use of "Raw" here is a historical artifact, and carried forward in
|
||||
order to avoid confusion.
|
||||
"""
|
||||
|
||||
|
||||
class EmbeddingModelRaw:
|
||||
"""Base class for LoRA and Textual Inversion models."""
|
@ -5,21 +5,4 @@ Initialization file for invokeai.backend.image_util methods.
|
||||
from .patchmatch import PatchMatch # noqa: F401
|
||||
from .pngwriter import PngWriter, PromptFormatter, retrieve_metadata, write_metadata # noqa: F401
|
||||
from .seamless import configure_model_padding # noqa: F401
|
||||
from .txt2mask import Txt2Mask # noqa: F401
|
||||
from .util import InitImageResizer, make_grid # noqa: F401
|
||||
|
||||
|
||||
def debug_image(debug_image, debug_text, debug_show=True, debug_result=False, debug_status=False):
|
||||
from PIL import ImageDraw
|
||||
|
||||
if not debug_status:
|
||||
return
|
||||
|
||||
image_copy = debug_image.copy().convert("RGBA")
|
||||
ImageDraw.Draw(image_copy).text((5, 5), debug_text, (255, 0, 0))
|
||||
|
||||
if debug_show:
|
||||
image_copy.show()
|
||||
|
||||
if debug_result:
|
||||
return image_copy
|
||||
|
41
invokeai/backend/image_util/canny.py
Normal file
41
invokeai/backend/image_util/canny.py
Normal file
@ -0,0 +1,41 @@
|
||||
import cv2
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.backend.image_util.util import (
|
||||
cv2_to_pil,
|
||||
normalize_image_channel_count,
|
||||
pil_to_cv2,
|
||||
resize_image_to_resolution,
|
||||
)
|
||||
|
||||
|
||||
def get_canny_edges(
|
||||
image: Image.Image, low_threshold: int, high_threshold: int, detect_resolution: int, image_resolution: int
|
||||
) -> Image.Image:
|
||||
"""Returns the edges of an image using the Canny edge detection algorithm.
|
||||
|
||||
Adapted from https://github.com/huggingface/controlnet_aux (Apache-2.0 license).
|
||||
|
||||
Args:
|
||||
image: The input image.
|
||||
low_threshold: The lower threshold for the hysteresis procedure.
|
||||
high_threshold: The upper threshold for the hysteresis procedure.
|
||||
input_resolution: The resolution of the input image. The image will be resized to this resolution before edge detection.
|
||||
output_resolution: The resolution of the output image. The edges will be resized to this resolution before returning.
|
||||
|
||||
Returns:
|
||||
The Canny edges of the input image.
|
||||
"""
|
||||
|
||||
if image.mode != "RGB":
|
||||
image = image.convert("RGB")
|
||||
|
||||
np_image = pil_to_cv2(image)
|
||||
np_image = normalize_image_channel_count(np_image)
|
||||
np_image = resize_image_to_resolution(np_image, detect_resolution)
|
||||
|
||||
edge_map = cv2.Canny(np_image, low_threshold, high_threshold)
|
||||
edge_map = normalize_image_channel_count(edge_map)
|
||||
edge_map = resize_image_to_resolution(edge_map, image_resolution)
|
||||
|
||||
return cv2_to_pil(edge_map)
|
@ -9,13 +9,15 @@ from einops import repeat
|
||||
from PIL import Image
|
||||
from torchvision.transforms import Compose
|
||||
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.download_with_progress import download_with_progress_bar
|
||||
from invokeai.backend.image_util.depth_anything.model.dpt import DPT_DINOv2
|
||||
from invokeai.backend.image_util.depth_anything.utilities.util import NormalizeImage, PrepareForNet, Resize
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
from invokeai.backend.util.util import download_with_progress_bar
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
config = get_config()
|
||||
logger = InvokeAILogger.get_logger(config=config)
|
||||
|
||||
DEPTH_ANYTHING_MODELS = {
|
||||
"large": {
|
||||
@ -54,11 +56,15 @@ class DepthAnythingDetector:
|
||||
def __init__(self) -> None:
|
||||
self.model = None
|
||||
self.model_size: Union[Literal["large", "base", "small"], None] = None
|
||||
self.device = choose_torch_device()
|
||||
|
||||
def load_model(self, model_size=Literal["large", "base", "small"]):
|
||||
DEPTH_ANYTHING_MODEL_PATH = pathlib.Path(config.models_path / DEPTH_ANYTHING_MODELS[model_size]["local"])
|
||||
if not DEPTH_ANYTHING_MODEL_PATH.exists():
|
||||
download_with_progress_bar(DEPTH_ANYTHING_MODELS[model_size]["url"], DEPTH_ANYTHING_MODEL_PATH)
|
||||
def load_model(self, model_size: Literal["large", "base", "small"] = "small"):
|
||||
DEPTH_ANYTHING_MODEL_PATH = config.models_path / DEPTH_ANYTHING_MODELS[model_size]["local"]
|
||||
download_with_progress_bar(
|
||||
pathlib.Path(DEPTH_ANYTHING_MODELS[model_size]["url"]).name,
|
||||
DEPTH_ANYTHING_MODELS[model_size]["url"],
|
||||
DEPTH_ANYTHING_MODEL_PATH,
|
||||
)
|
||||
|
||||
if not self.model or model_size != self.model_size:
|
||||
del self.model
|
||||
@ -71,8 +77,6 @@ class DepthAnythingDetector:
|
||||
self.model = DPT_DINOv2(encoder="vitb", features=128, out_channels=[96, 192, 384, 768])
|
||||
case "large":
|
||||
self.model = DPT_DINOv2(encoder="vitl", features=256, out_channels=[256, 512, 1024, 1024])
|
||||
case _:
|
||||
raise TypeError("Not a supported model")
|
||||
|
||||
self.model.load_state_dict(torch.load(DEPTH_ANYTHING_MODEL_PATH.as_posix(), map_location="cpu"))
|
||||
self.model.eval()
|
||||
@ -80,20 +84,20 @@ class DepthAnythingDetector:
|
||||
self.model.to(choose_torch_device())
|
||||
return self.model
|
||||
|
||||
def to(self, device):
|
||||
self.model.to(device)
|
||||
return self
|
||||
def __call__(self, image: Image.Image, resolution: int = 512) -> Image.Image:
|
||||
if not self.model:
|
||||
logger.warn("DepthAnything model was not loaded. Returning original image")
|
||||
return image
|
||||
|
||||
def __call__(self, image, resolution=512, offload=False):
|
||||
image = np.array(image, dtype=np.uint8)
|
||||
image = image[:, :, ::-1] / 255.0
|
||||
np_image = np.array(image, dtype=np.uint8)
|
||||
np_image = np_image[:, :, ::-1] / 255.0
|
||||
|
||||
image_height, image_width = image.shape[:2]
|
||||
image = transform({"image": image})["image"]
|
||||
image = torch.from_numpy(image).unsqueeze(0).to(choose_torch_device())
|
||||
image_height, image_width = np_image.shape[:2]
|
||||
np_image = transform({"image": np_image})["image"]
|
||||
tensor_image = torch.from_numpy(np_image).unsqueeze(0).to(choose_torch_device())
|
||||
|
||||
with torch.no_grad():
|
||||
depth = self.model(image)
|
||||
depth = self.model(tensor_image)
|
||||
depth = F.interpolate(depth[None], (image_height, image_width), mode="bilinear", align_corners=False)[0, 0]
|
||||
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
|
||||
|
||||
@ -103,7 +107,4 @@ class DepthAnythingDetector:
|
||||
new_height = int(image_height * (resolution / image_width))
|
||||
depth_map = depth_map.resize((resolution, new_height))
|
||||
|
||||
if offload:
|
||||
del self.model
|
||||
|
||||
return depth_map
|
||||
|
@ -1,14 +1,13 @@
|
||||
# Code from the original DWPose Implementation: https://github.com/IDEA-Research/DWPose
|
||||
# Modified pathing to suit Invoke
|
||||
|
||||
import pathlib
|
||||
|
||||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
|
||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.download_with_progress import download_with_progress_bar
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
from invokeai.backend.util.util import download_with_progress_bar
|
||||
|
||||
from .onnxdet import inference_detector
|
||||
from .onnxpose import inference_pose
|
||||
@ -24,7 +23,7 @@ DWPOSE_MODELS = {
|
||||
},
|
||||
}
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
config = get_config()
|
||||
|
||||
|
||||
class Wholebody:
|
||||
@ -33,13 +32,13 @@ class Wholebody:
|
||||
|
||||
providers = ["CUDAExecutionProvider"] if device == "cuda" else ["CPUExecutionProvider"]
|
||||
|
||||
DET_MODEL_PATH = pathlib.Path(config.models_path / DWPOSE_MODELS["yolox_l.onnx"]["local"])
|
||||
if not DET_MODEL_PATH.exists():
|
||||
download_with_progress_bar(DWPOSE_MODELS["yolox_l.onnx"]["url"], DET_MODEL_PATH)
|
||||
DET_MODEL_PATH = config.models_path / DWPOSE_MODELS["yolox_l.onnx"]["local"]
|
||||
download_with_progress_bar("yolox_l.onnx", DWPOSE_MODELS["yolox_l.onnx"]["url"], DET_MODEL_PATH)
|
||||
|
||||
POSE_MODEL_PATH = pathlib.Path(config.models_path / DWPOSE_MODELS["dw-ll_ucoco_384.onnx"]["local"])
|
||||
if not POSE_MODEL_PATH.exists():
|
||||
download_with_progress_bar(DWPOSE_MODELS["dw-ll_ucoco_384.onnx"]["url"], POSE_MODEL_PATH)
|
||||
POSE_MODEL_PATH = config.models_path / DWPOSE_MODELS["dw-ll_ucoco_384.onnx"]["local"]
|
||||
download_with_progress_bar(
|
||||
"dw-ll_ucoco_384.onnx", DWPOSE_MODELS["dw-ll_ucoco_384.onnx"]["url"], POSE_MODEL_PATH
|
||||
)
|
||||
|
||||
onnx_det = DET_MODEL_PATH
|
||||
onnx_pose = POSE_MODEL_PATH
|
||||
|
142
invokeai/backend/image_util/hed.py
Normal file
142
invokeai/backend/image_util/hed.py
Normal file
@ -0,0 +1,142 @@
|
||||
"""Adapted from https://github.com/huggingface/controlnet_aux (Apache-2.0 license)."""
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
from einops import rearrange
|
||||
from huggingface_hub import hf_hub_download
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.backend.image_util.util import (
|
||||
non_maximum_suppression,
|
||||
normalize_image_channel_count,
|
||||
np_to_pil,
|
||||
pil_to_np,
|
||||
resize_image_to_resolution,
|
||||
safe_step,
|
||||
)
|
||||
|
||||
|
||||
class DoubleConvBlock(torch.nn.Module):
|
||||
def __init__(self, input_channel, output_channel, layer_number):
|
||||
super().__init__()
|
||||
self.convs = torch.nn.Sequential()
|
||||
self.convs.append(
|
||||
torch.nn.Conv2d(
|
||||
in_channels=input_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1
|
||||
)
|
||||
)
|
||||
for _i in range(1, layer_number):
|
||||
self.convs.append(
|
||||
torch.nn.Conv2d(
|
||||
in_channels=output_channel,
|
||||
out_channels=output_channel,
|
||||
kernel_size=(3, 3),
|
||||
stride=(1, 1),
|
||||
padding=1,
|
||||
)
|
||||
)
|
||||
self.projection = torch.nn.Conv2d(
|
||||
in_channels=output_channel, out_channels=1, kernel_size=(1, 1), stride=(1, 1), padding=0
|
||||
)
|
||||
|
||||
def __call__(self, x, down_sampling=False):
|
||||
h = x
|
||||
if down_sampling:
|
||||
h = torch.nn.functional.max_pool2d(h, kernel_size=(2, 2), stride=(2, 2))
|
||||
for conv in self.convs:
|
||||
h = conv(h)
|
||||
h = torch.nn.functional.relu(h)
|
||||
return h, self.projection(h)
|
||||
|
||||
|
||||
class ControlNetHED_Apache2(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.norm = torch.nn.Parameter(torch.zeros(size=(1, 3, 1, 1)))
|
||||
self.block1 = DoubleConvBlock(input_channel=3, output_channel=64, layer_number=2)
|
||||
self.block2 = DoubleConvBlock(input_channel=64, output_channel=128, layer_number=2)
|
||||
self.block3 = DoubleConvBlock(input_channel=128, output_channel=256, layer_number=3)
|
||||
self.block4 = DoubleConvBlock(input_channel=256, output_channel=512, layer_number=3)
|
||||
self.block5 = DoubleConvBlock(input_channel=512, output_channel=512, layer_number=3)
|
||||
|
||||
def __call__(self, x):
|
||||
h = x - self.norm
|
||||
h, projection1 = self.block1(h)
|
||||
h, projection2 = self.block2(h, down_sampling=True)
|
||||
h, projection3 = self.block3(h, down_sampling=True)
|
||||
h, projection4 = self.block4(h, down_sampling=True)
|
||||
h, projection5 = self.block5(h, down_sampling=True)
|
||||
return projection1, projection2, projection3, projection4, projection5
|
||||
|
||||
|
||||
class HEDProcessor:
|
||||
"""Holistically-Nested Edge Detection.
|
||||
|
||||
On instantiation, loads the HED model from the HuggingFace Hub.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
model_path = hf_hub_download("lllyasviel/Annotators", "ControlNetHED.pth")
|
||||
self.network = ControlNetHED_Apache2()
|
||||
self.network.load_state_dict(torch.load(model_path, map_location="cpu"))
|
||||
self.network.float().eval()
|
||||
|
||||
def to(self, device: torch.device):
|
||||
self.network.to(device)
|
||||
return self
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_image: Image.Image,
|
||||
detect_resolution: int = 512,
|
||||
image_resolution: int = 512,
|
||||
safe: bool = False,
|
||||
scribble: bool = False,
|
||||
) -> Image.Image:
|
||||
"""Processes an image and returns the detected edges.
|
||||
|
||||
Args:
|
||||
input_image: The input image.
|
||||
detect_resolution: The resolution to fit the image to before edge detection.
|
||||
image_resolution: The resolution to fit the edges to before returning.
|
||||
safe: Whether to apply safe step to the detected edges.
|
||||
scribble: Whether to apply non-maximum suppression and Gaussian blur to the detected edges.
|
||||
|
||||
Returns:
|
||||
The detected edges.
|
||||
"""
|
||||
device = next(iter(self.network.parameters())).device
|
||||
np_image = pil_to_np(input_image)
|
||||
np_image = normalize_image_channel_count(np_image)
|
||||
np_image = resize_image_to_resolution(np_image, detect_resolution)
|
||||
|
||||
assert np_image.ndim == 3
|
||||
height, width, _channels = np_image.shape
|
||||
with torch.no_grad():
|
||||
image_hed = torch.from_numpy(np_image.copy()).float().to(device)
|
||||
image_hed = rearrange(image_hed, "h w c -> 1 c h w")
|
||||
edges = self.network(image_hed)
|
||||
edges = [e.detach().cpu().numpy().astype(np.float32)[0, 0] for e in edges]
|
||||
edges = [cv2.resize(e, (width, height), interpolation=cv2.INTER_LINEAR) for e in edges]
|
||||
edges = np.stack(edges, axis=2)
|
||||
edge = 1 / (1 + np.exp(-np.mean(edges, axis=2).astype(np.float64)))
|
||||
if safe:
|
||||
edge = safe_step(edge)
|
||||
edge = (edge * 255.0).clip(0, 255).astype(np.uint8)
|
||||
|
||||
detected_map = edge
|
||||
detected_map = normalize_image_channel_count(detected_map)
|
||||
|
||||
img = resize_image_to_resolution(np_image, image_resolution)
|
||||
height, width, _channels = img.shape
|
||||
|
||||
detected_map = cv2.resize(detected_map, (width, height), interpolation=cv2.INTER_LINEAR)
|
||||
|
||||
if scribble:
|
||||
detected_map = non_maximum_suppression(detected_map, 127, 3.0)
|
||||
detected_map = cv2.GaussianBlur(detected_map, (0, 0), 3.0)
|
||||
detected_map[detected_map > 4] = 255
|
||||
detected_map[detected_map < 255] = 0
|
||||
|
||||
return np_to_pil(detected_map)
|
@ -10,9 +10,9 @@ from imwatermark import WatermarkEncoder
|
||||
from PIL import Image
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
config = get_config()
|
||||
|
||||
|
||||
class InvisibleWatermark:
|
||||
@ -20,14 +20,8 @@ class InvisibleWatermark:
|
||||
Wrapper around InvisibleWatermark module.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def invisible_watermark_available(cls) -> bool:
|
||||
return config.invisible_watermark
|
||||
|
||||
@classmethod
|
||||
def add_watermark(cls, image: Image.Image, watermark_text: str) -> Image.Image:
|
||||
if not cls.invisible_watermark_available():
|
||||
return image
|
||||
logger.debug(f'Applying invisible watermark "{watermark_text}"')
|
||||
bgr = cv2.cvtColor(np.array(image.convert("RGB")), cv2.COLOR_RGB2BGR)
|
||||
encoder = WatermarkEncoder()
|
||||
|
@ -1,46 +0,0 @@
|
||||
# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Development Team
|
||||
|
||||
"""Very simple functions to fetch and print metadata from InvokeAI-generated images."""
|
||||
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict
|
||||
|
||||
from PIL import Image
|
||||
|
||||
|
||||
def get_invokeai_metadata(image_path: Path) -> Dict[str, Any]:
|
||||
"""
|
||||
Retrieve "invokeai_metadata" field from png image.
|
||||
|
||||
:param image_path: Path to the image to read metadata from.
|
||||
May raise:
|
||||
OSError -- image path not found
|
||||
KeyError -- image doesn't contain the metadata field
|
||||
"""
|
||||
image: Image = Image.open(image_path)
|
||||
return json.loads(image.text["invokeai_metadata"])
|
||||
|
||||
|
||||
def print_invokeai_metadata(image_path: Path):
|
||||
"""Pretty-print the metadata."""
|
||||
try:
|
||||
metadata = get_invokeai_metadata(image_path)
|
||||
print(f"{image_path}:\n{json.dumps(metadata, sort_keys=True, indent=4)}")
|
||||
except OSError:
|
||||
print(f"{image_path}:\nNo file found.")
|
||||
except KeyError:
|
||||
print(f"{image_path}:\nNo metadata found.")
|
||||
print()
|
||||
|
||||
|
||||
def main():
|
||||
"""Run the command-line utility."""
|
||||
image_paths = sys.argv[1:]
|
||||
if not image_paths:
|
||||
print(f"Usage: {Path(sys.argv[0]).name} image1 image2 image3 ...")
|
||||
print("\nPretty-print InvokeAI image metadata from the listed png files.")
|
||||
sys.exit(-1)
|
||||
for img in image_paths:
|
||||
print_invokeai_metadata(img)
|
@ -6,7 +6,7 @@ import torch
|
||||
from PIL import Image
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import get_invokeai_config
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
|
||||
|
||||
@ -29,7 +29,7 @@ def load_jit_model(url_or_path, device):
|
||||
class LaMA:
|
||||
def __call__(self, input_image: Image.Image, *args: Any, **kwds: Any) -> Any:
|
||||
device = choose_torch_device()
|
||||
model_location = get_invokeai_config().models_path / "core/misc/lama/lama.pt"
|
||||
model_location = get_config().models_path / "core/misc/lama/lama.pt"
|
||||
model = load_jit_model(model_location, device)
|
||||
|
||||
image = np.asarray(input_image.convert("RGB"))
|
||||
|
158
invokeai/backend/image_util/lineart.py
Normal file
158
invokeai/backend/image_util/lineart.py
Normal file
@ -0,0 +1,158 @@
|
||||
"""Adapted from https://github.com/huggingface/controlnet_aux (Apache-2.0 license)."""
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from einops import rearrange
|
||||
from huggingface_hub import hf_hub_download
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.backend.image_util.util import (
|
||||
normalize_image_channel_count,
|
||||
np_to_pil,
|
||||
pil_to_np,
|
||||
resize_image_to_resolution,
|
||||
)
|
||||
|
||||
|
||||
class ResidualBlock(nn.Module):
|
||||
def __init__(self, in_features):
|
||||
super(ResidualBlock, self).__init__()
|
||||
|
||||
conv_block = [
|
||||
nn.ReflectionPad2d(1),
|
||||
nn.Conv2d(in_features, in_features, 3),
|
||||
nn.InstanceNorm2d(in_features),
|
||||
nn.ReLU(inplace=True),
|
||||
nn.ReflectionPad2d(1),
|
||||
nn.Conv2d(in_features, in_features, 3),
|
||||
nn.InstanceNorm2d(in_features),
|
||||
]
|
||||
|
||||
self.conv_block = nn.Sequential(*conv_block)
|
||||
|
||||
def forward(self, x):
|
||||
return x + self.conv_block(x)
|
||||
|
||||
|
||||
class Generator(nn.Module):
|
||||
def __init__(self, input_nc, output_nc, n_residual_blocks=9, sigmoid=True):
|
||||
super(Generator, self).__init__()
|
||||
|
||||
# Initial convolution block
|
||||
model0 = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, 64, 7), nn.InstanceNorm2d(64), nn.ReLU(inplace=True)]
|
||||
self.model0 = nn.Sequential(*model0)
|
||||
|
||||
# Downsampling
|
||||
model1 = []
|
||||
in_features = 64
|
||||
out_features = in_features * 2
|
||||
for _ in range(2):
|
||||
model1 += [
|
||||
nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
|
||||
nn.InstanceNorm2d(out_features),
|
||||
nn.ReLU(inplace=True),
|
||||
]
|
||||
in_features = out_features
|
||||
out_features = in_features * 2
|
||||
self.model1 = nn.Sequential(*model1)
|
||||
|
||||
model2 = []
|
||||
# Residual blocks
|
||||
for _ in range(n_residual_blocks):
|
||||
model2 += [ResidualBlock(in_features)]
|
||||
self.model2 = nn.Sequential(*model2)
|
||||
|
||||
# Upsampling
|
||||
model3 = []
|
||||
out_features = in_features // 2
|
||||
for _ in range(2):
|
||||
model3 += [
|
||||
nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1),
|
||||
nn.InstanceNorm2d(out_features),
|
||||
nn.ReLU(inplace=True),
|
||||
]
|
||||
in_features = out_features
|
||||
out_features = in_features // 2
|
||||
self.model3 = nn.Sequential(*model3)
|
||||
|
||||
# Output layer
|
||||
model4 = [nn.ReflectionPad2d(3), nn.Conv2d(64, output_nc, 7)]
|
||||
if sigmoid:
|
||||
model4 += [nn.Sigmoid()]
|
||||
|
||||
self.model4 = nn.Sequential(*model4)
|
||||
|
||||
def forward(self, x, cond=None):
|
||||
out = self.model0(x)
|
||||
out = self.model1(out)
|
||||
out = self.model2(out)
|
||||
out = self.model3(out)
|
||||
out = self.model4(out)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
class LineartProcessor:
|
||||
"""Processor for lineart detection."""
|
||||
|
||||
def __init__(self):
|
||||
model_path = hf_hub_download("lllyasviel/Annotators", "sk_model.pth")
|
||||
self.model = Generator(3, 1, 3)
|
||||
self.model.load_state_dict(torch.load(model_path, map_location=torch.device("cpu")))
|
||||
self.model.eval()
|
||||
|
||||
coarse_model_path = hf_hub_download("lllyasviel/Annotators", "sk_model2.pth")
|
||||
self.model_coarse = Generator(3, 1, 3)
|
||||
self.model_coarse.load_state_dict(torch.load(coarse_model_path, map_location=torch.device("cpu")))
|
||||
self.model_coarse.eval()
|
||||
|
||||
def to(self, device: torch.device):
|
||||
self.model.to(device)
|
||||
self.model_coarse.to(device)
|
||||
return self
|
||||
|
||||
def run(
|
||||
self, input_image: Image.Image, coarse: bool = False, detect_resolution: int = 512, image_resolution: int = 512
|
||||
) -> Image.Image:
|
||||
"""Processes an image to detect lineart.
|
||||
|
||||
Args:
|
||||
input_image: The input image.
|
||||
coarse: Whether to use the coarse model.
|
||||
detect_resolution: The resolution to fit the image to before edge detection.
|
||||
image_resolution: The resolution of the output image.
|
||||
|
||||
Returns:
|
||||
The detected lineart.
|
||||
"""
|
||||
device = next(iter(self.model.parameters())).device
|
||||
|
||||
np_image = pil_to_np(input_image)
|
||||
np_image = normalize_image_channel_count(np_image)
|
||||
np_image = resize_image_to_resolution(np_image, detect_resolution)
|
||||
|
||||
model = self.model_coarse if coarse else self.model
|
||||
assert np_image.ndim == 3
|
||||
image = np_image
|
||||
with torch.no_grad():
|
||||
image = torch.from_numpy(image).float().to(device)
|
||||
image = image / 255.0
|
||||
image = rearrange(image, "h w c -> 1 c h w")
|
||||
line = model(image)[0][0]
|
||||
|
||||
line = line.cpu().numpy()
|
||||
line = (line * 255.0).clip(0, 255).astype(np.uint8)
|
||||
|
||||
detected_map = line
|
||||
|
||||
detected_map = normalize_image_channel_count(detected_map)
|
||||
|
||||
img = resize_image_to_resolution(np_image, image_resolution)
|
||||
H, W, C = img.shape
|
||||
|
||||
detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
|
||||
detected_map = 255 - detected_map
|
||||
|
||||
return np_to_pil(detected_map)
|
203
invokeai/backend/image_util/lineart_anime.py
Normal file
203
invokeai/backend/image_util/lineart_anime.py
Normal file
@ -0,0 +1,203 @@
|
||||
"""Adapted from https://github.com/huggingface/controlnet_aux (Apache-2.0 license)."""
|
||||
|
||||
import functools
|
||||
from typing import Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from einops import rearrange
|
||||
from huggingface_hub import hf_hub_download
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.backend.image_util.util import (
|
||||
normalize_image_channel_count,
|
||||
np_to_pil,
|
||||
pil_to_np,
|
||||
resize_image_to_resolution,
|
||||
)
|
||||
|
||||
|
||||
class UnetGenerator(nn.Module):
|
||||
"""Create a Unet-based generator"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
input_nc: int,
|
||||
output_nc: int,
|
||||
num_downs: int,
|
||||
ngf: int = 64,
|
||||
norm_layer=nn.BatchNorm2d,
|
||||
use_dropout: bool = False,
|
||||
):
|
||||
"""Construct a Unet generator
|
||||
Parameters:
|
||||
input_nc (int) -- the number of channels in input images
|
||||
output_nc (int) -- the number of channels in output images
|
||||
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
|
||||
image of size 128x128 will become of size 1x1 # at the bottleneck
|
||||
ngf (int) -- the number of filters in the last conv layer
|
||||
norm_layer -- normalization layer
|
||||
We construct the U-Net from the innermost layer to the outermost layer.
|
||||
It is a recursive process.
|
||||
"""
|
||||
super(UnetGenerator, self).__init__()
|
||||
# construct unet structure
|
||||
unet_block = UnetSkipConnectionBlock(
|
||||
ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True
|
||||
) # add the innermost layer
|
||||
for _ in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
|
||||
unet_block = UnetSkipConnectionBlock(
|
||||
ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout
|
||||
)
|
||||
# gradually reduce the number of filters from ngf * 8 to ngf
|
||||
unet_block = UnetSkipConnectionBlock(
|
||||
ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer
|
||||
)
|
||||
unet_block = UnetSkipConnectionBlock(
|
||||
ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer
|
||||
)
|
||||
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
|
||||
self.model = UnetSkipConnectionBlock(
|
||||
output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer
|
||||
) # add the outermost layer
|
||||
|
||||
def forward(self, input):
|
||||
"""Standard forward"""
|
||||
return self.model(input)
|
||||
|
||||
|
||||
class UnetSkipConnectionBlock(nn.Module):
|
||||
"""Defines the Unet submodule with skip connection.
|
||||
X -------------------identity----------------------
|
||||
|-- downsampling -- |submodule| -- upsampling --|
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
outer_nc: int,
|
||||
inner_nc: int,
|
||||
input_nc: Optional[int] = None,
|
||||
submodule=None,
|
||||
outermost: bool = False,
|
||||
innermost: bool = False,
|
||||
norm_layer=nn.BatchNorm2d,
|
||||
use_dropout: bool = False,
|
||||
):
|
||||
"""Construct a Unet submodule with skip connections.
|
||||
Parameters:
|
||||
outer_nc (int) -- the number of filters in the outer conv layer
|
||||
inner_nc (int) -- the number of filters in the inner conv layer
|
||||
input_nc (int) -- the number of channels in input images/features
|
||||
submodule (UnetSkipConnectionBlock) -- previously defined submodules
|
||||
outermost (bool) -- if this module is the outermost module
|
||||
innermost (bool) -- if this module is the innermost module
|
||||
norm_layer -- normalization layer
|
||||
use_dropout (bool) -- if use dropout layers.
|
||||
"""
|
||||
super(UnetSkipConnectionBlock, self).__init__()
|
||||
self.outermost = outermost
|
||||
if type(norm_layer) == functools.partial:
|
||||
use_bias = norm_layer.func == nn.InstanceNorm2d
|
||||
else:
|
||||
use_bias = norm_layer == nn.InstanceNorm2d
|
||||
if input_nc is None:
|
||||
input_nc = outer_nc
|
||||
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
|
||||
downrelu = nn.LeakyReLU(0.2, True)
|
||||
downnorm = norm_layer(inner_nc)
|
||||
uprelu = nn.ReLU(True)
|
||||
upnorm = norm_layer(outer_nc)
|
||||
|
||||
if outermost:
|
||||
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)
|
||||
down = [downconv]
|
||||
up = [uprelu, upconv, nn.Tanh()]
|
||||
model = down + [submodule] + up
|
||||
elif innermost:
|
||||
upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
|
||||
down = [downrelu, downconv]
|
||||
up = [uprelu, upconv, upnorm]
|
||||
model = down + up
|
||||
else:
|
||||
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
|
||||
down = [downrelu, downconv, downnorm]
|
||||
up = [uprelu, upconv, upnorm]
|
||||
|
||||
if use_dropout:
|
||||
model = down + [submodule] + up + [nn.Dropout(0.5)]
|
||||
else:
|
||||
model = down + [submodule] + up
|
||||
|
||||
self.model = nn.Sequential(*model)
|
||||
|
||||
def forward(self, x):
|
||||
if self.outermost:
|
||||
return self.model(x)
|
||||
else: # add skip connections
|
||||
return torch.cat([x, self.model(x)], 1)
|
||||
|
||||
|
||||
class LineartAnimeProcessor:
|
||||
"""Processes an image to detect lineart."""
|
||||
|
||||
def __init__(self):
|
||||
model_path = hf_hub_download("lllyasviel/Annotators", "netG.pth")
|
||||
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
|
||||
self.model = UnetGenerator(3, 1, 8, 64, norm_layer=norm_layer, use_dropout=False)
|
||||
ckpt = torch.load(model_path)
|
||||
for key in list(ckpt.keys()):
|
||||
if "module." in key:
|
||||
ckpt[key.replace("module.", "")] = ckpt[key]
|
||||
del ckpt[key]
|
||||
self.model.load_state_dict(ckpt)
|
||||
self.model.eval()
|
||||
|
||||
def to(self, device: torch.device):
|
||||
self.model.to(device)
|
||||
return self
|
||||
|
||||
def run(self, input_image: Image.Image, detect_resolution: int = 512, image_resolution: int = 512) -> Image.Image:
|
||||
"""Processes an image to detect lineart.
|
||||
|
||||
Args:
|
||||
input_image: The input image.
|
||||
detect_resolution: The resolution to use for detection.
|
||||
image_resolution: The resolution to use for the output image.
|
||||
|
||||
Returns:
|
||||
The detected lineart.
|
||||
"""
|
||||
device = next(iter(self.model.parameters())).device
|
||||
np_image = pil_to_np(input_image)
|
||||
|
||||
np_image = normalize_image_channel_count(np_image)
|
||||
np_image = resize_image_to_resolution(np_image, detect_resolution)
|
||||
|
||||
H, W, C = np_image.shape
|
||||
Hn = 256 * int(np.ceil(float(H) / 256.0))
|
||||
Wn = 256 * int(np.ceil(float(W) / 256.0))
|
||||
img = cv2.resize(np_image, (Wn, Hn), interpolation=cv2.INTER_CUBIC)
|
||||
with torch.no_grad():
|
||||
image_feed = torch.from_numpy(img).float().to(device)
|
||||
image_feed = image_feed / 127.5 - 1.0
|
||||
image_feed = rearrange(image_feed, "h w c -> 1 c h w")
|
||||
|
||||
line = self.model(image_feed)[0, 0] * 127.5 + 127.5
|
||||
line = line.cpu().numpy()
|
||||
|
||||
line = cv2.resize(line, (W, H), interpolation=cv2.INTER_CUBIC)
|
||||
line = line.clip(0, 255).astype(np.uint8)
|
||||
|
||||
detected_map = line
|
||||
|
||||
detected_map = normalize_image_channel_count(detected_map)
|
||||
|
||||
img = resize_image_to_resolution(np_image, image_resolution)
|
||||
H, W, C = img.shape
|
||||
|
||||
detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
|
||||
detected_map = 255 - detected_map
|
||||
|
||||
return np_to_pil(detected_map)
|
@ -8,9 +8,7 @@ be suppressed or deferred
|
||||
import numpy as np
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
|
||||
|
||||
class PatchMatch:
|
||||
@ -28,7 +26,7 @@ class PatchMatch:
|
||||
def _load_patch_match(self):
|
||||
if self.tried_load:
|
||||
return
|
||||
if config.try_patchmatch:
|
||||
if get_config().patchmatch:
|
||||
from patchmatch import patch_match as pm
|
||||
|
||||
if pm.patchmatch_available:
|
||||
|
@ -4,16 +4,18 @@ wraps the safety_checker model. It respects the global "nsfw_checker"
|
||||
configuration variable, that allows the checker to be supressed.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
||||
from PIL import Image
|
||||
from transformers import AutoFeatureExtractor
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
from invokeai.backend.util.silence_warnings import SilenceWarnings
|
||||
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
|
||||
CHECKER_PATH = "core/convert/stable-diffusion-safety-checker"
|
||||
|
||||
|
||||
@ -31,30 +33,24 @@ class SafetyChecker:
|
||||
if cls.tried_load:
|
||||
return
|
||||
|
||||
if config.nsfw_checker:
|
||||
try:
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
||||
from transformers import AutoFeatureExtractor
|
||||
|
||||
cls.safety_checker = StableDiffusionSafetyChecker.from_pretrained(config.models_path / CHECKER_PATH)
|
||||
cls.feature_extractor = AutoFeatureExtractor.from_pretrained(config.models_path / CHECKER_PATH)
|
||||
logger.info("NSFW checker initialized")
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load NSFW checker: {str(e)}")
|
||||
else:
|
||||
logger.info("NSFW checker loading disabled")
|
||||
try:
|
||||
cls.safety_checker = StableDiffusionSafetyChecker.from_pretrained(get_config().models_path / CHECKER_PATH)
|
||||
cls.feature_extractor = AutoFeatureExtractor.from_pretrained(get_config().models_path / CHECKER_PATH)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load NSFW checker: {str(e)}")
|
||||
cls.tried_load = True
|
||||
|
||||
@classmethod
|
||||
def safety_checker_available(cls) -> bool:
|
||||
cls._load_safety_checker()
|
||||
return cls.safety_checker is not None
|
||||
return Path(get_config().models_path, CHECKER_PATH).exists()
|
||||
|
||||
@classmethod
|
||||
def has_nsfw_concept(cls, image: Image.Image) -> bool:
|
||||
if not cls.safety_checker_available():
|
||||
if not cls.safety_checker_available() and cls.tried_load:
|
||||
return False
|
||||
cls._load_safety_checker()
|
||||
if cls.safety_checker is None or cls.feature_extractor is None:
|
||||
return False
|
||||
|
||||
device = choose_torch_device()
|
||||
features = cls.feature_extractor([image], return_tensors="pt")
|
||||
features.to(device)
|
||||
|
@ -1,115 +0,0 @@
|
||||
"""Makes available the Txt2Mask class, which assists in the automatic
|
||||
assignment of masks via text prompt using clipseg.
|
||||
|
||||
Here is typical usage:
|
||||
|
||||
from invokeai.backend.image_util.txt2mask import Txt2Mask, SegmentedGrayscale
|
||||
from PIL import Image
|
||||
|
||||
txt2mask = Txt2Mask(self.device)
|
||||
segmented = txt2mask.segment(Image.open('/path/to/img.png'),'a bagel')
|
||||
|
||||
# this will return a grayscale Image of the segmented data
|
||||
grayscale = segmented.to_grayscale()
|
||||
|
||||
# this will return a semi-transparent image in which the
|
||||
# selected object(s) are opaque and the rest is at various
|
||||
# levels of transparency
|
||||
transparent = segmented.to_transparent()
|
||||
|
||||
# this will return a masked image suitable for use in inpainting:
|
||||
mask = segmented.to_mask(threshold=0.5)
|
||||
|
||||
The threshold used in the call to to_mask() selects pixels for use in
|
||||
the mask that exceed the indicated confidence threshold. Values range
|
||||
from 0.0 to 1.0. The higher the threshold, the more confident the
|
||||
algorithm is. In limited testing, I have found that values around 0.5
|
||||
work fine.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from PIL import Image, ImageOps
|
||||
from transformers import AutoProcessor, CLIPSegForImageSegmentation
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
|
||||
CLIPSEG_SIZE = 352
|
||||
config = InvokeAIAppConfig.get_config()
|
||||
|
||||
|
||||
class SegmentedGrayscale(object):
|
||||
def __init__(self, image: Image.Image, heatmap: torch.Tensor):
|
||||
self.heatmap = heatmap
|
||||
self.image = image
|
||||
|
||||
def to_grayscale(self, invert: bool = False) -> Image.Image:
|
||||
return self._rescale(Image.fromarray(np.uint8(255 - self.heatmap * 255 if invert else self.heatmap * 255)))
|
||||
|
||||
def to_mask(self, threshold: float = 0.5) -> Image.Image:
|
||||
discrete_heatmap = self.heatmap.lt(threshold).int()
|
||||
return self._rescale(Image.fromarray(np.uint8(discrete_heatmap * 255), mode="L"))
|
||||
|
||||
def to_transparent(self, invert: bool = False) -> Image.Image:
|
||||
transparent_image = self.image.copy()
|
||||
# For img2img, we want the selected regions to be transparent,
|
||||
# but to_grayscale() returns the opposite. Thus invert.
|
||||
gs = self.to_grayscale(not invert)
|
||||
transparent_image.putalpha(gs)
|
||||
return transparent_image
|
||||
|
||||
# unscales and uncrops the 352x352 heatmap so that it matches the image again
|
||||
def _rescale(self, heatmap: Image.Image) -> Image.Image:
|
||||
size = self.image.width if (self.image.width > self.image.height) else self.image.height
|
||||
resized_image = heatmap.resize((size, size), resample=Image.Resampling.LANCZOS)
|
||||
return resized_image.crop((0, 0, self.image.width, self.image.height))
|
||||
|
||||
|
||||
class Txt2Mask(object):
|
||||
"""
|
||||
Create new Txt2Mask object. The optional device argument can be one of
|
||||
'cuda', 'mps' or 'cpu'.
|
||||
"""
|
||||
|
||||
def __init__(self, device="cpu", refined=False):
|
||||
logger.info("Initializing clipseg model for text to mask inference")
|
||||
|
||||
# BUG: we are not doing anything with the device option at this time
|
||||
self.device = device
|
||||
self.processor = AutoProcessor.from_pretrained(CLIPSEG_MODEL, cache_dir=config.cache_dir)
|
||||
self.model = CLIPSegForImageSegmentation.from_pretrained(CLIPSEG_MODEL, cache_dir=config.cache_dir)
|
||||
|
||||
@torch.no_grad()
|
||||
def segment(self, image: Image.Image, prompt: str) -> SegmentedGrayscale:
|
||||
"""
|
||||
Given a prompt string such as "a bagel", tries to identify the object in the
|
||||
provided image and returns a SegmentedGrayscale object in which the brighter
|
||||
pixels indicate where the object is inferred to be.
|
||||
"""
|
||||
if isinstance(image, str):
|
||||
image = Image.open(image).convert("RGB")
|
||||
|
||||
image = ImageOps.exif_transpose(image)
|
||||
img = self._scale_and_crop(image)
|
||||
|
||||
inputs = self.processor(text=[prompt], images=[img], padding=True, return_tensors="pt")
|
||||
outputs = self.model(**inputs)
|
||||
heatmap = torch.sigmoid(outputs.logits)
|
||||
return SegmentedGrayscale(image, heatmap)
|
||||
|
||||
def _scale_and_crop(self, image: Image.Image) -> Image.Image:
|
||||
scaled_image = Image.new("RGB", (CLIPSEG_SIZE, CLIPSEG_SIZE))
|
||||
if image.width > image.height: # width is constraint
|
||||
scale = CLIPSEG_SIZE / image.width
|
||||
else:
|
||||
scale = CLIPSEG_SIZE / image.height
|
||||
scaled_image.paste(
|
||||
image.resize(
|
||||
(int(scale * image.width), int(scale * image.height)),
|
||||
resample=Image.Resampling.LANCZOS,
|
||||
),
|
||||
box=(0, 0),
|
||||
)
|
||||
return scaled_image
|
@ -1,5 +1,7 @@
|
||||
from math import ceil, floor, sqrt
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
|
||||
@ -69,3 +71,134 @@ def make_grid(image_list, rows=None, cols=None):
|
||||
i = i + 1
|
||||
|
||||
return grid_img
|
||||
|
||||
|
||||
def pil_to_np(image: Image.Image) -> np.ndarray:
|
||||
"""Converts a PIL image to a numpy array."""
|
||||
return np.array(image, dtype=np.uint8)
|
||||
|
||||
|
||||
def np_to_pil(image: np.ndarray) -> Image.Image:
|
||||
"""Converts a numpy array to a PIL image."""
|
||||
return Image.fromarray(image)
|
||||
|
||||
|
||||
def pil_to_cv2(image: Image.Image) -> np.ndarray:
|
||||
"""Converts a PIL image to a CV2 image."""
|
||||
return cv2.cvtColor(np.array(image, dtype=np.uint8), cv2.COLOR_RGB2BGR)
|
||||
|
||||
|
||||
def cv2_to_pil(image: np.ndarray) -> Image.Image:
|
||||
"""Converts a CV2 image to a PIL image."""
|
||||
return Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
||||
|
||||
|
||||
def normalize_image_channel_count(image: np.ndarray) -> np.ndarray:
|
||||
"""Normalizes an image to have 3 channels.
|
||||
|
||||
If the image has 1 channel, it will be duplicated 3 times.
|
||||
If the image has 1 channel, a third empty channel will be added.
|
||||
If the image has 4 channels, the alpha channel will be used to blend the image with a white background.
|
||||
|
||||
Adapted from https://github.com/huggingface/controlnet_aux (Apache-2.0 license).
|
||||
|
||||
Args:
|
||||
image: The input image.
|
||||
|
||||
Returns:
|
||||
The normalized image.
|
||||
"""
|
||||
assert image.dtype == np.uint8
|
||||
if image.ndim == 2:
|
||||
image = image[:, :, None]
|
||||
assert image.ndim == 3
|
||||
_height, _width, channels = image.shape
|
||||
assert channels == 1 or channels == 3 or channels == 4
|
||||
if channels == 3:
|
||||
return image
|
||||
if channels == 1:
|
||||
return np.concatenate([image, image, image], axis=2)
|
||||
if channels == 4:
|
||||
color = image[:, :, 0:3].astype(np.float32)
|
||||
alpha = image[:, :, 3:4].astype(np.float32) / 255.0
|
||||
normalized = color * alpha + 255.0 * (1.0 - alpha)
|
||||
normalized = normalized.clip(0, 255).astype(np.uint8)
|
||||
return normalized
|
||||
|
||||
raise ValueError("Invalid number of channels.")
|
||||
|
||||
|
||||
def resize_image_to_resolution(input_image: np.ndarray, resolution: int) -> np.ndarray:
|
||||
"""Resizes an image, fitting it to the given resolution.
|
||||
|
||||
Adapted from https://github.com/huggingface/controlnet_aux (Apache-2.0 license).
|
||||
|
||||
Args:
|
||||
input_image: The input image.
|
||||
resolution: The resolution to fit the image to.
|
||||
|
||||
Returns:
|
||||
The resized image.
|
||||
"""
|
||||
h = float(input_image.shape[0])
|
||||
w = float(input_image.shape[1])
|
||||
scaling_factor = float(resolution) / min(h, w)
|
||||
h *= scaling_factor
|
||||
w *= scaling_factor
|
||||
h = int(np.round(h / 64.0)) * 64
|
||||
w = int(np.round(w / 64.0)) * 64
|
||||
if scaling_factor > 1:
|
||||
return cv2.resize(input_image, (w, h), interpolation=cv2.INTER_LANCZOS4)
|
||||
else:
|
||||
return cv2.resize(input_image, (w, h), interpolation=cv2.INTER_AREA)
|
||||
|
||||
|
||||
def non_maximum_suppression(image: np.ndarray, threshold: int, sigma: float):
|
||||
"""
|
||||
Apply non-maximum suppression to an image.
|
||||
|
||||
This function is adapted from https://github.com/lllyasviel/ControlNet.
|
||||
|
||||
Args:
|
||||
image: The input image.
|
||||
threshold: The threshold value for the suppression. Pixels with values greater than this will be set to 255.
|
||||
sigma: The standard deviation for the Gaussian blur applied to the image.
|
||||
|
||||
Returns:
|
||||
The image after non-maximum suppression.
|
||||
"""
|
||||
|
||||
image = cv2.GaussianBlur(image.astype(np.float32), (0, 0), sigma)
|
||||
|
||||
filter_1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
|
||||
filter_2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
|
||||
filter_3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
|
||||
filter_4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
|
||||
|
||||
y = np.zeros_like(image)
|
||||
|
||||
for f in [filter_1, filter_2, filter_3, filter_4]:
|
||||
np.putmask(y, cv2.dilate(image, kernel=f) == image, image)
|
||||
|
||||
z = np.zeros_like(y, dtype=np.uint8)
|
||||
z[y > threshold] = 255
|
||||
return z
|
||||
|
||||
|
||||
def safe_step(x: np.ndarray, step: int = 2) -> np.ndarray:
|
||||
"""Apply the safe step operation to an array.
|
||||
|
||||
I don't fully understand the purpose of this function, but it appears to be normalizing/quantizing the array.
|
||||
|
||||
Adapted from https://github.com/huggingface/controlnet_aux (Apache-2.0 license).
|
||||
|
||||
Args:
|
||||
x: The input array.
|
||||
step: The step value.
|
||||
|
||||
Returns:
|
||||
The array after the safe step operation.
|
||||
"""
|
||||
y = x.astype(np.float32) * float(step + 1)
|
||||
y = y.astype(np.int32).astype(np.float32) / float(step)
|
||||
return y
|
||||
|
@ -1,41 +0,0 @@
|
||||
"""
|
||||
Check that the invokeai_root is correctly configured and exit if not.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
|
||||
|
||||
def check_invokeai_root(config: InvokeAIAppConfig):
|
||||
try:
|
||||
assert config.db_path.parent.exists(), f"{config.db_path.parent} not found"
|
||||
assert config.models_path.exists(), f"{config.models_path} not found"
|
||||
if not config.ignore_missing_core_models:
|
||||
for model in [
|
||||
"CLIP-ViT-bigG-14-laion2B-39B-b160k",
|
||||
"bert-base-uncased",
|
||||
"clip-vit-large-patch14",
|
||||
"sd-vae-ft-mse",
|
||||
"stable-diffusion-2-clip",
|
||||
"stable-diffusion-safety-checker",
|
||||
]:
|
||||
path = config.models_path / f"core/convert/{model}"
|
||||
assert path.exists(), f"{path} is missing"
|
||||
except Exception as e:
|
||||
print()
|
||||
print(f"An exception has occurred: {str(e)}")
|
||||
print("== STARTUP ABORTED ==")
|
||||
print("** One or more necessary files is missing from your InvokeAI root directory **")
|
||||
print("** Please rerun the configuration script to fix this problem. **")
|
||||
print("** From the launcher, selection option [6]. **")
|
||||
print(
|
||||
'** From the command line, activate the virtual environment and run "invokeai-configure --yes --skip-sd-weights" **'
|
||||
)
|
||||
print(
|
||||
'** (To skip this check completely, add "--ignore_missing_core_models" to your CLI args. Not installing '
|
||||
"these core models will prevent the loading of some or all .safetensors and .ckpt files. However, you can "
|
||||
"always come back and install these core models in the future.)"
|
||||
)
|
||||
input("Press any key to continue...")
|
||||
sys.exit(0)
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user