mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Compare commits
655 Commits
lstein/tes
...
psyche/fea
Author | SHA1 | Date | |
---|---|---|---|
8eb5316c9f | |||
12ce095bb2 | |||
242b2a0b59 | |||
86e201612f | |||
f6d1e1be22 | |||
edf043d1d6 | |||
70a21eda78 | |||
452f4fe0e6 | |||
87f2d04ddd | |||
9c45cbe8f7 | |||
5f7c852493 | |||
35ef02bdf7 | |||
7da283f433 | |||
812cf277b8 | |||
182cb51bf0 | |||
64a3adfc64 | |||
a48ef9f7a7 | |||
9aeabf10df | |||
7b93cc8538 | |||
a2480c16e7 | |||
b1e2dd222e | |||
1f92e9eec2 | |||
fb402f3b46 | |||
0abc328ddf | |||
cfa4e5f88e | |||
24d0d4932d | |||
20db93b901 | |||
500a733d79 | |||
338d5f158b | |||
63e4b224b2 | |||
e9043ff060 | |||
c725851c64 | |||
a1c4ef55d7 | |||
e25b39aca2 | |||
32a02b3329 | |||
620ee2875e | |||
5553588147 | |||
1c29b3bd85 | |||
e88b807a13 | |||
9e55ef3d4b | |||
8062a47d16 | |||
dba8c43ecb | |||
8ebf2ddf15 | |||
f4625c2671 | |||
c94742bde6 | |||
a34faf0bd8 | |||
ecfff6cb1e | |||
ba8bed6870 | |||
ca186bca61 | |||
e2f109807c | |||
281bd31db2 | |||
cea1874e00 | |||
89b0e9e4de | |||
26d0d55d97 | |||
059c5586a4 | |||
9ed5698aa8 | |||
0b5696c5d4 | |||
a51142674a | |||
b8b671c0db | |||
7cceafe0dd | |||
cbe32b647a | |||
9a8e0842bb | |||
1d7671298f | |||
e38d75c3dc | |||
21fab9785a | |||
b3429553bb | |||
e480844042 | |||
26029108f7 | |||
504ac82077 | |||
6b11740dda | |||
a80e3448f5 | |||
4bda174eb9 | |||
b1e28c2f2c | |||
83000a4190 | |||
c98205d0d7 | |||
ce2ad5903c | |||
fe3980a369 | |||
ea97ae5ae8 | |||
3605b6b1a3 | |||
fc31dddbf7 | |||
6ad01d824d | |||
78f9f3ee95 | |||
972398d203 | |||
857889d1fa | |||
8074a802d6 | |||
059d5a682c | |||
00c2d8f95d | |||
04a596179b | |||
3fcb2720d7 | |||
6f7160b9fd | |||
6b4e464d17 | |||
9f7841a04b | |||
468644ab18 | |||
9d127fee6b | |||
6658897210 | |||
af7b194bec | |||
de1ea50e6d | |||
2680ef52c2 | |||
a012bb6e07 | |||
6a2c53f6c5 | |||
2cbf7d9221 | |||
fe7ed72c9c | |||
85a5a7c47a | |||
af3fd26d4e | |||
5127fd6320 | |||
124d34a8cc | |||
e8387d7523 | |||
a5d08c981b | |||
811d0da0f0 | |||
17e1fc5254 | |||
84e031edc2 | |||
b6b7e737e0 | |||
5f3e7afd45 | |||
b0cfca9d24 | |||
985ef89825 | |||
5928ade5fd | |||
93ebc175c6 | |||
386d552493 | |||
799cf06d20 | |||
922716d2ab | |||
66fc110b64 | |||
822f1e1f06 | |||
5d60c3c8e1 | |||
4e21d01c7f | |||
6b7b0b3777 | |||
07feb5ba07 | |||
a18d7adad4 | |||
32dff2c4e3 | |||
575ecb4028 | |||
ad8778df6c | |||
d2f5103f9f | |||
dd42a56084 | |||
23ac340a3f | |||
6791b4eaa8 | |||
a8b042177d | |||
76825f4261 | |||
78cb4d75ad | |||
a18bbac262 | |||
9ff5596963 | |||
8ea596b1e9 | |||
e3a143eaed | |||
c359ab6d9b | |||
dbfaa07e03 | |||
7f78fe7a36 | |||
6cf5b402c6 | |||
b0c7c7cb47 | |||
4d68cd8dbb | |||
2c1fa30639 | |||
708c68413d | |||
1d884fb794 | |||
f6a44681a8 | |||
d4df312300 | |||
9c0d44b412 | |||
27826369f0 | |||
31d8b50276 | |||
40b4fa7238 | |||
3b1743b7c2 | |||
f489c818f1 | |||
af477fa295 | |||
0ff0290735 | |||
67dbe6d949 | |||
4c3c2297b9 | |||
cadea55521 | |||
c8f30b1392 | |||
3d14a98abf | |||
77024bfca7 | |||
4a1c3786a1 | |||
b239891986 | |||
9fb03d43ff | |||
bdc59786bd | |||
fb6e926500 | |||
48ccd63dba | |||
ee647a05dc | |||
154b52ca4d | |||
5dd460c3ce | |||
4897ce2a13 | |||
5425526d50 | |||
5a4b050e66 | |||
8d39520232 | |||
04d12a1e98 | |||
39aa70963b | |||
5743254a41 | |||
c538ffea26 | |||
e8d3a7c870 | |||
2be66b1546 | |||
76e181fd44 | |||
b5d42fbc66 | |||
b463cd763e | |||
eb320df41d | |||
de1869773f | |||
ef89c7e537 | |||
008645d386 | |||
f8042ffb41 | |||
dbe22be598 | |||
8f6078d007 | |||
4020bf47e2 | |||
9d685da759 | |||
e3289856c0 | |||
47b8153728 | |||
7901e4c082 | |||
18b0977a31 | |||
fc6b214470 | |||
e22211dac0 | |||
e222484663 | |||
2a9cea6689 | |||
93da75209c | |||
9c819f0fd8 | |||
eef6fcf286 | |||
e375d9f787 | |||
ab18174774 | |||
9265841384 | |||
c5fd08125d | |||
11d88dae7f | |||
3b495659b0 | |||
15c9a3a4b6 | |||
60e77e4ed6 | |||
fa832a8ac6 | |||
f7834d7d59 | |||
63d7461510 | |||
1de704160e | |||
b118a2565c | |||
eb166baafe | |||
818d37f304 | |||
9cdb801c1c | |||
5da8cde4fc | |||
6ec3dc0c0d | |||
6050dffb25 | |||
93efeafe30 | |||
f167e8a8d3 | |||
124d49f35e | |||
52d8efa892 | |||
4ea8416c68 | |||
8dd0bfb068 | |||
6ff1c7d541 | |||
19f5a9c3a9 | |||
d9ce9c62ac | |||
cdc468a38c | |||
2656f13a4a | |||
da61396b1c | |||
6c9fb617dc | |||
5dd73fe53e | |||
e6793be465 | |||
63e62c5720 | |||
0848cb8ebd | |||
1b777bb972 | |||
029ee90351 | |||
2f9a064d48 | |||
b180666497 | |||
4740cd4f64 | |||
8b51298ba1 | |||
1533429e54 | |||
fc000214a5 | |||
f631aea4ee | |||
32f4c1f966 | |||
adebe639e3 | |||
44280ed472 | |||
cec8840038 | |||
fc7f484935 | |||
1aa7cd57c2 | |||
722a91aedb | |||
03c24ca9cb | |||
5820579237 | |||
6c768bfe7e | |||
5ca794b94f | |||
d20695260d | |||
d8557d573b | |||
6c1fd584d2 | |||
e8e764be20 | |||
e8023c44b0 | |||
a3a6449786 | |||
e9d2ffe3d7 | |||
23ad6fb730 | |||
00f36cb491 | |||
3f489c92c8 | |||
f147f99bef | |||
6107e3d281 | |||
de33d6e647 | |||
e36e5871a1 | |||
8b25c1a62e | |||
dfbd7eb1cf | |||
b43b2714cc | |||
e537de2f6d | |||
ccd399e277 | |||
bfad814862 | |||
6e8b7f9421 | |||
e47629cbe7 | |||
e840de27ed | |||
8342f32f2e | |||
a7aa529b99 | |||
4adc592657 | |||
e8d60e8d83 | |||
886f5c90a3 | |||
5e684c11f1 | |||
72ce239592 | |||
a826f8f8c5 | |||
b6c19a8e47 | |||
67d6cf19c6 | |||
a9bf651c69 | |||
3bd5d9a8e4 | |||
6249982d82 | |||
6b98dba71d | |||
c0065a65a0 | |||
cce3144c74 | |||
aab152a7e9 | |||
c5b948bc3f | |||
44ecddae2e | |||
26847895b9 | |||
e4a640f0a7 | |||
b5b6a96d94 | |||
806a8f69c5 | |||
ac0b9ba290 | |||
7ca613d41c | |||
5cb1ff8679 | |||
8794b99d51 | |||
6bdded85da | |||
26613f10c7 | |||
6d2fe3b691 | |||
2888845f7c | |||
4beccea6e7 | |||
68d1458c83 | |||
f4dde883ca | |||
be7eeb576b | |||
af9f0e0963 | |||
3cba53533d | |||
ab87511a03 | |||
af868b0ea6 | |||
960eae8255 | |||
0787c6c746 | |||
579d436934 | |||
36f01988e8 | |||
d9b92d19f9 | |||
fdfc379a84 | |||
2062cfe84a | |||
eb36e834b2 | |||
2baa33730a | |||
c30df7ce79 | |||
f05ac5a7a5 | |||
85dd78b8df | |||
4c7be03702 | |||
e354fee4f4 | |||
20e628297c | |||
98664fc46f | |||
33617fc06a | |||
c05e52ebae | |||
5734a97c55 | |||
94a73d5377 | |||
0f7fdabe9b | |||
7c1f1076b4 | |||
a6ac184211 | |||
7d58908e32 | |||
26d3ec3fce | |||
dc81357152 | |||
c9886796f6 | |||
209ddc2037 | |||
8b6a283eab | |||
75be6814bb | |||
1d213067e8 | |||
d67480d92c | |||
d55ea318ec | |||
474eab6f8a | |||
1b13fee256 | |||
6363095b29 | |||
4cd78b9478 | |||
2cde8a643e | |||
f9555f03f5 | |||
b1d8f3a3f9 | |||
33a9f9a4dc | |||
c35625eb44 | |||
6f572e1cce | |||
54acd3f2b1 | |||
6e966909ab | |||
311ba8c04b | |||
1b617768cf | |||
8ceb94497e | |||
efb571401c | |||
ffba4871d0 | |||
9437d701b2 | |||
6effa19626 | |||
45c2ac41d5 | |||
ca1c3c0873 | |||
47ee08db91 | |||
c96b98fc9e | |||
905baf2787 | |||
0e55488ff6 | |||
424a27eeda | |||
6007218a51 | |||
811e8a5a8b | |||
121918352a | |||
3717321480 | |||
4a250bdf9c | |||
dce8b88aaf | |||
1bdcbe3284 | |||
88ac3bc7f0 | |||
abb3bb9f7e | |||
2ddb82200c | |||
38880cde5c | |||
39ab4dd83e | |||
631878b212 | |||
7a5399e83c | |||
e90775731d | |||
3f26880493 | |||
21cf1004db | |||
d74cd12aa6 | |||
cf1883585d | |||
8a791d4f16 | |||
1212698059 | |||
ba6db33b39 | |||
b3dbfdaa02 | |||
3441187c23 | |||
8de56fd77c | |||
22bd33b7c6 | |||
2af5c4be9f | |||
415a41e21a | |||
aa2ca03056 | |||
a20faca20f | |||
9d042baf48 | |||
6195741814 | |||
c2f8adf93e | |||
ace3955760 | |||
720e16cea6 | |||
a357a1ac9d | |||
22f160bfcc | |||
fa637b5c59 | |||
1f68a60752 | |||
048bd18e10 | |||
e5ec529f0f | |||
d884c15d0c | |||
9ee7cad613 | |||
629110784d | |||
c1666a8b5a | |||
d14b315bc6 | |||
fe459295ea | |||
9d67ec9efe | |||
5bf4d37949 | |||
387ab9cee7 | |||
56050f7887 | |||
c354470cd1 | |||
ded8267505 | |||
e822897b1c | |||
2d7b8c2a1b | |||
ebeae41cb2 | |||
6f5f3381f9 | |||
2f6fec8c6c | |||
cc4bef4859 | |||
b6a45e53f1 | |||
1cf1e53a6c | |||
c686625076 | |||
d861bc690e | |||
f262b9032d | |||
71c3197eab | |||
241a1fdb57 | |||
3595beac1e | |||
caa7c0f2bd | |||
d546823c4d | |||
dac2d78da6 | |||
398f37c0ed | |||
6b0bf59682 | |||
5b8f77f990 | |||
3207822738 | |||
8d86fabf4b | |||
af3e910ad3 | |||
af25d00964 | |||
d4a30d08ef | |||
bd8a33e824 | |||
b425646b7b | |||
293e11cfa6 | |||
c73aabdfbf | |||
ca989c54b0 | |||
260e24733f | |||
bb6e3e726d | |||
6b394554e2 | |||
ae1955a1a8 | |||
1bef13db37 | |||
a461537087 | |||
99e28da19b | |||
42a159beaa | |||
0aa5aadfe8 | |||
2537d260e3 | |||
bbf919a933 | |||
01897ec576 | |||
bc12d6654e | |||
6d7c8d5f57 | |||
38604aa408 | |||
781de914f4 | |||
c094bad233 | |||
0063014f2b | |||
d7b5ad02e8 | |||
2cee436ecf | |||
e6386d969f | |||
4b2b983646 | |||
53808149fb | |||
21ba55d0a6 | |||
28c28b2fc0 | |||
8b9c4c62a6 | |||
cf637ecaa6 | |||
fca718bdd3 | |||
5196a2efec | |||
385e93443a | |||
604217313a | |||
229423b370 | |||
75a548e3eb | |||
24dbb65ebb | |||
c915220965 | |||
bb37e25ed0 | |||
dda1111f20 | |||
9d71b91b7f | |||
714126b832 | |||
a10c66797d | |||
6dcaf75b5f | |||
018845cda0 | |||
8c0a061fa0 | |||
4895875ded | |||
cfddbda578 | |||
58d3a9e7d4 | |||
a00e703144 | |||
e4024bdeb9 | |||
944690ac8e | |||
a7d69aa0a9 | |||
15018fdbc0 | |||
31ace9aff8 | |||
3f4ea30113 | |||
7edcadb371 | |||
d582203c62 | |||
148a6c08ca | |||
1e904d281a | |||
03d9a75720 | |||
5edce0a4de | |||
604bf4e9ec | |||
39d036bb37 | |||
8a69fbd336 | |||
a71ed10b71 | |||
9d3978edcf | |||
18e1d74917 | |||
9276ecfd02 | |||
ea527f5fe1 | |||
d43f9732ab | |||
c613839740 | |||
bb371cfeca | |||
6a5510146c | |||
9667f77c41 | |||
e93e0612af | |||
9528287d56 | |||
14c722c265 | |||
4b2cd2da9f | |||
3c5b728bee | |||
9b5c47748d | |||
eb781272f7 | |||
642a0de3dd | |||
f3b4cecf2e | |||
499e7a7b74 | |||
aace364677 | |||
c195094e91 | |||
e6c57edf87 | |||
c217e052a8 | |||
964e2236b9 | |||
a6e64423d9 | |||
d3aa97ab99 | |||
0d8edd67ab | |||
d9dd00ea20 | |||
170763899a | |||
9e1a4a4a07 | |||
dcb4a40741 | |||
f8bf985256 | |||
81f29b9624 | |||
f2dde9a035 | |||
85f4a066fb | |||
b9e6b7ba48 | |||
085f7bdbee | |||
e4fcb6627a | |||
47aa6357d1 | |||
b81030fe27 | |||
a1a9f0da73 | |||
8f4f3b773c | |||
00737efc31 | |||
5924dc6ff6 | |||
246fabf2a0 | |||
30e3e12513 | |||
a5bfe2dccb | |||
aa6bfc8645 | |||
20ccdb6c8f | |||
8caa7bc2b1 | |||
ede8826757 | |||
ff7aa2558a | |||
c9bf00b80b | |||
1f8f429d55 | |||
d34e431002 | |||
cdb481e836 | |||
525e6d697c | |||
bbbb5479e8 | |||
ae7797f662 | |||
05deeb68fa | |||
602a59066e | |||
d1db6198b5 | |||
944fa1a847 | |||
52e7daffe7 | |||
cf4c1750cb | |||
de7ecc8e3e | |||
6c0481ef51 | |||
b9d0da44eb | |||
0a42d7d510 | |||
c1aae0815d | |||
e7523bd1d9 | |||
8911017bd1 | |||
fc26f3e430 | |||
c89a24d1ea | |||
52ba4966c9 | |||
822dfa77fc | |||
83d359b681 | |||
f87eee810b | |||
1d1e4d02dc | |||
2b9f06dc4c | |||
a35386f24c | |||
ac1071a5e5 | |||
5295a398f3 | |||
0c7283c82d | |||
73ad173c74 | |||
c828a4e59f | |||
6bab040d24 | |||
f46bbaf8c4 | |||
fce6b3e44c | |||
d27907cc6d | |||
7ee3fef2db | |||
b39ce642b6 | |||
a148c4322c | |||
f6b7bc5d98 | |||
5f6c6abf9c | |||
cd76a31a8f | |||
e93f4d632d | |||
5a8489bbfc | |||
a24c9d0f7a | |||
7a92afc117 | |||
b508945b11 | |||
8426f1e7b2 | |||
9cb0f63c44 | |||
2d5786d3bb | |||
27466ffa1a | |||
f50b156511 | |||
9fc73743b2 | |||
d4393e4170 | |||
145a0b029e | |||
f2506cc769 | |||
7a67fd6a06 | |||
af36fe8c1e | |||
e9f16ac8c7 | |||
6ea183f0d4 | |||
07cb6c944e | |||
1d45ef529b | |||
0259114d9c | |||
51e515b925 | |||
8c509295f9 | |||
23da3de915 | |||
97579770e1 | |||
1a83936cdd | |||
80e311a069 | |||
b6e6bdc195 |
495
README.md
495
README.md
@ -2,21 +2,102 @@
|
||||
|
||||

|
||||
|
||||
# Invoke - Professional Creative AI Tools for Visual Media
|
||||
## To learn more about Invoke, or implement our Business solutions, visit [invoke.com](https://www.invoke.com/about)
|
||||
|
||||
# Invoke - Professional Creative AI Tools for Visual Media
|
||||
|
||||
#### To learn more about Invoke, or implement our Business solutions, visit [invoke.com]
|
||||
|
||||
[![discord badge]][discord link]
|
||||
[![discord badge]][discord link] [![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link] [![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link] [![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link]
|
||||
|
||||
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]
|
||||
</div>
|
||||
|
||||
[![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link]
|
||||
Invoke is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. Invoke offers an industry leading web-based UI, and serves as the foundation for multiple commercial products.
|
||||
|
||||
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link]
|
||||
[Installation and Updates][installation docs] - [Documentation and Tutorials][docs home] - [Bug Reports][github issues] - [Contributing][contributing docs]
|
||||
|
||||
<div align="center">
|
||||
|
||||

|
||||
|
||||
</div>
|
||||
|
||||
## Quick Start
|
||||
|
||||
1. Download and unzip the installer from the bottom of the [latest release][latest release link].
|
||||
2. Run the installer script.
|
||||
|
||||
- **Windows**: Double-click on the `install.bat` script.
|
||||
- **macOS**: Open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press enter.
|
||||
- **Linux**: Run `install.sh`.
|
||||
|
||||
3. When prompted, enter a location for the install and select your GPU type.
|
||||
4. Once the install finishes, find the directory you selected during install. The default location is `C:\Users\Username\invokeai` for Windows or `~/invokeai` for Linux/macOS.
|
||||
5. Run the launcher script (`invoke.bat` for Windows, `invoke.sh` for macOS and Linux) the same way you ran the installer script in step 2.
|
||||
6. Select option 1 to start the application. Once it starts up, open your browser and go to <http://localhost:9090>.
|
||||
7. Open the model manager tab to install a starter model and then you'll be ready to generate.
|
||||
|
||||
More detail, including hardware requirements and manual install instructions, are available in the [installation documentation][installation docs].
|
||||
|
||||
## Troubleshooting, FAQ and Support
|
||||
|
||||
Please review our [FAQ][faq] for solutions to common installation problems and other issues.
|
||||
|
||||
For more help, please join our [Discord][discord link].
|
||||
|
||||
## Features
|
||||
|
||||
Full details on features can be found in [our documentation][features docs].
|
||||
|
||||
### Web Server & UI
|
||||
|
||||
Invoke runs a locally hosted web server & React UI with an industry-leading user experience.
|
||||
|
||||
### Unified Canvas
|
||||
|
||||
The Unified Canvas is a fully integrated canvas implementation with support for all core generation capabilities, in/out-painting, brush tools, and more. This creative tool unlocks the capability for artists to create with AI as a creative collaborator, and can be used to augment AI-generated imagery, sketches, photography, renders, and more.
|
||||
|
||||
### Workflows & Nodes
|
||||
|
||||
Invoke offers a fully featured workflow management solution, enabling users to combine the power of node-based workflows with the easy of a UI. This allows for customizable generation pipelines to be developed and shared by users looking to create specific workflows to support their production use-cases.
|
||||
|
||||
### Board & Gallery Management
|
||||
|
||||
Invoke features an organized gallery system for easily storing, accessing, and remixing your content in the Invoke workspace. Images can be dragged/dropped onto any Image-base UI element in the application, and rich metadata within the Image allows for easy recall of key prompts or settings used in your workflow.
|
||||
|
||||
### Other features
|
||||
|
||||
- Support for both ckpt and diffusers models
|
||||
- SD1.5, SD2.0, and SDXL support
|
||||
- Upscaling Tools
|
||||
- Embedding Manager & Support
|
||||
- Model Manager & Support
|
||||
- Workflow creation & management
|
||||
- Node-Based Architecture
|
||||
|
||||
## Contributing
|
||||
|
||||
Anyone who wishes to contribute to this project - whether documentation, features, bug fixes, code cleanup, testing, or code reviews - is very much encouraged to do so.
|
||||
|
||||
Get started with contributing by reading our [contribution documentation][contributing docs], joining the [#dev-chat] or the GitHub discussion board.
|
||||
|
||||
We hope you enjoy using Invoke as much as we enjoy creating it, and we hope you will elect to become part of our community.
|
||||
|
||||
## Thanks
|
||||
|
||||
Invoke is a combined effort of [passionate and talented people from across the world][contributors]. We thank them for their time, hard work and effort.
|
||||
|
||||
Original portions of the software are Copyright © 2024 by respective contributors.
|
||||
|
||||
[features docs]: https://invoke-ai.github.io/InvokeAI/features/
|
||||
[faq]: https://invoke-ai.github.io/InvokeAI/help/FAQ/
|
||||
[contributors]: https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/
|
||||
[invoke.com]: https://www.invoke.com/about
|
||||
[github issues]: https://github.com/invoke-ai/InvokeAI/issues
|
||||
[docs home]: https://invoke-ai.github.io/InvokeAI
|
||||
[installation docs]: https://invoke-ai.github.io/InvokeAI/installation/INSTALLATION/
|
||||
[#dev-chat]: https://discord.com/channels/1020123559063990373/1049495067846524939
|
||||
[contributing docs]: https://invoke-ai.github.io/InvokeAI/contributing/CONTRIBUTING/
|
||||
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||
[CI checks on main link]:https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain
|
||||
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain
|
||||
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||
@ -30,402 +111,6 @@
|
||||
[latest commit to main badge]: https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/main?icon=github&color=yellow&label=last%20dev%20commit&cache=900
|
||||
[latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main
|
||||
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases/latest
|
||||
[translation status badge]: https://hosted.weblate.org/widgets/invokeai/-/svg-badge.svg
|
||||
[translation status link]: https://hosted.weblate.org/engage/invokeai/
|
||||
|
||||
</div>
|
||||
|
||||
InvokeAI is a leading creative engine built to empower professionals
|
||||
and enthusiasts alike. Generate and create stunning visual media using
|
||||
the latest AI-driven technologies. InvokeAI offers an industry leading
|
||||
Web Interface, interactive Command Line Interface, and also serves as
|
||||
the foundation for multiple commercial products.
|
||||
|
||||
**Quick links**: [[How to
|
||||
Install](https://invoke-ai.github.io/InvokeAI/installation/INSTALLATION/)] [<a
|
||||
href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a
|
||||
href="https://invoke-ai.github.io/InvokeAI/">Documentation and
|
||||
Tutorials</a>]
|
||||
[<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>]
|
||||
[<a
|
||||
href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion,
|
||||
Ideas & Q&A</a>]
|
||||
[<a
|
||||
href="https://invoke-ai.github.io/InvokeAI/contributing/CONTRIBUTING/">Contributing</a>]
|
||||
|
||||
<div align="center">
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
</div>
|
||||
|
||||
## Table of Contents
|
||||
|
||||
Table of Contents 📝
|
||||
|
||||
**Getting Started**
|
||||
1. 🏁 [Quick Start](#quick-start)
|
||||
3. 🖥️ [Hardware Requirements](#hardware-requirements)
|
||||
|
||||
**More About Invoke**
|
||||
1. 🌟 [Features](#features)
|
||||
2. 📣 [Latest Changes](#latest-changes)
|
||||
3. 🛠️ [Troubleshooting](#troubleshooting)
|
||||
|
||||
**Supporting the Project**
|
||||
1. 🤝 [Contributing](#contributing)
|
||||
2. 👥 [Contributors](#contributors)
|
||||
3. 💕 [Support](#support)
|
||||
|
||||
## Quick Start
|
||||
|
||||
For full installation and upgrade instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALLATION/)
|
||||
|
||||
If upgrading from version 2.3, please read [Migrating a 2.3 root
|
||||
directory to 3.0](#migrating-to-3) first.
|
||||
|
||||
### Automatic Installer (suggested for 1st time users)
|
||||
|
||||
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
|
||||
|
||||
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
||||
|
||||
3. Unzip the file.
|
||||
|
||||
4. **Windows:** double-click on the `install.bat` script. **macOS:** Open a Terminal window, drag the file `install.sh` from Finder
|
||||
into the Terminal, and press return. **Linux:** run `install.sh`.
|
||||
|
||||
5. You'll be asked to confirm the location of the folder in which
|
||||
to install InvokeAI and its image generation model files. Pick a
|
||||
location with at least 15 GB of free memory. More if you plan on
|
||||
installing lots of models.
|
||||
|
||||
6. Wait while the installer does its thing. After installing the software,
|
||||
the installer will launch a script that lets you configure InvokeAI and
|
||||
select a set of starting image generation models.
|
||||
|
||||
7. Find the folder that InvokeAI was installed into (it is not the
|
||||
same as the unpacked zip file directory!) The default location of this
|
||||
folder (if you didn't change it in step 5) is `~/invokeai` on
|
||||
Linux/Mac systems, and `C:\Users\YourName\invokeai` on Windows. This directory will contain launcher scripts named `invoke.sh` and `invoke.bat`.
|
||||
|
||||
8. On Windows systems, double-click on the `invoke.bat` file. On
|
||||
macOS, open a Terminal window, drag `invoke.sh` from the folder into
|
||||
the Terminal, and press return. On Linux, run `invoke.sh`
|
||||
|
||||
9. Press 2 to open the "browser-based UI", press enter/return, wait a
|
||||
minute or two for Stable Diffusion to start up, then open your browser
|
||||
and go to http://localhost:9090.
|
||||
|
||||
10. Type `banana sushi` in the box on the top left and click `Invoke`
|
||||
|
||||
### Command-Line Installation (for developers and users familiar with Terminals)
|
||||
|
||||
You must have Python 3.10 through 3.11 installed on your machine. Earlier or
|
||||
later versions are not supported.
|
||||
Node.js also needs to be installed along with `pnpm` (can be installed with
|
||||
the command `npm install -g pnpm` if needed)
|
||||
|
||||
1. Open a command-line window on your machine. The PowerShell is recommended for Windows.
|
||||
2. Create a directory to install InvokeAI into. You'll need at least 15 GB of free space:
|
||||
|
||||
```terminal
|
||||
mkdir invokeai
|
||||
````
|
||||
|
||||
3. Create a virtual environment named `.venv` inside this directory and activate it:
|
||||
|
||||
```terminal
|
||||
cd invokeai
|
||||
python -m venv .venv --prompt InvokeAI
|
||||
```
|
||||
|
||||
4. Activate the virtual environment (do it every time you run InvokeAI)
|
||||
|
||||
_For Linux/Mac users:_
|
||||
|
||||
```sh
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
_For Windows users:_
|
||||
|
||||
```ps
|
||||
.venv\Scripts\activate
|
||||
```
|
||||
|
||||
5. Install the InvokeAI module and its dependencies. Choose the command suited for your platform & GPU.
|
||||
|
||||
_For Windows/Linux with an NVIDIA GPU:_
|
||||
|
||||
```terminal
|
||||
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
|
||||
```
|
||||
|
||||
_For Linux with an AMD GPU:_
|
||||
|
||||
```sh
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||
```
|
||||
|
||||
_For non-GPU systems:_
|
||||
```terminal
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
```
|
||||
|
||||
_For Macintoshes, either Intel or M1/M2/M3:_
|
||||
|
||||
```sh
|
||||
pip install InvokeAI --use-pep517
|
||||
```
|
||||
|
||||
6. Configure InvokeAI and install a starting set of image generation models (you only need to do this once):
|
||||
|
||||
```terminal
|
||||
invokeai-configure --root .
|
||||
```
|
||||
Don't miss the dot at the end!
|
||||
|
||||
7. Launch the web server (do it every time you run InvokeAI):
|
||||
|
||||
```terminal
|
||||
invokeai-web
|
||||
```
|
||||
|
||||
8. Point your browser to http://localhost:9090 to bring up the web interface.
|
||||
|
||||
9. Type `banana sushi` in the box on the top left and click `Invoke`.
|
||||
|
||||
Be sure to activate the virtual environment each time before re-launching InvokeAI,
|
||||
using `source .venv/bin/activate` or `.venv\Scripts\activate`.
|
||||
|
||||
## Detailed Installation Instructions
|
||||
|
||||
This fork is supported across Linux, Windows and Macintosh. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
AMD card (using the ROCm driver). For full installation and upgrade
|
||||
instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
|
||||
|
||||
<a name="migrating-to-3"></a>
|
||||
### Migrating a v2.3 InvokeAI root directory
|
||||
|
||||
The InvokeAI root directory is where the InvokeAI startup file,
|
||||
installed models, and generated images are stored. It is ordinarily
|
||||
named `invokeai` and located in your home directory. The contents and
|
||||
layout of this directory has changed between versions 2.3 and 3.0 and
|
||||
cannot be used directly.
|
||||
|
||||
We currently recommend that you use the installer to create a new root
|
||||
directory named differently from the 2.3 one, e.g. `invokeai-3` and
|
||||
then use a migration script to copy your 2.3 models into the new
|
||||
location. However, if you choose, you can upgrade this directory in
|
||||
place. This section gives both recipes.
|
||||
|
||||
#### Creating a new root directory and migrating old models
|
||||
|
||||
This is the safer recipe because it leaves your old root directory in
|
||||
place to fall back on.
|
||||
|
||||
1. Follow the instructions above to create and install InvokeAI in a
|
||||
directory that has a different name from the 2.3 invokeai directory.
|
||||
In this example, we will use "invokeai-3"
|
||||
|
||||
2. When you are prompted to select models to install, select a minimal
|
||||
set of models, such as stable-diffusion-v1.5 only.
|
||||
|
||||
3. After installation is complete launch `invokeai.sh` (Linux/Mac) or
|
||||
`invokeai.bat` and select option 8 "Open the developers console". This
|
||||
will take you to the command line.
|
||||
|
||||
4. Issue the command `invokeai-migrate3 --from /path/to/v2.3-root --to
|
||||
/path/to/invokeai-3-root`. Provide the correct `--from` and `--to`
|
||||
paths for your v2.3 and v3.0 root directories respectively.
|
||||
|
||||
This will copy and convert your old models from 2.3 format to 3.0
|
||||
format and create a new `models` directory in the 3.0 directory. The
|
||||
old models directory (which contains the models selected at install
|
||||
time) will be renamed `models.orig` and can be deleted once you have
|
||||
confirmed that the migration was successful.
|
||||
|
||||
If you wish, you can pass the 2.3 root directory to both `--from` and
|
||||
`--to` in order to update in place. Warning: this directory will no
|
||||
longer be usable with InvokeAI 2.3.
|
||||
|
||||
#### Migrating in place
|
||||
|
||||
For the adventurous, you may do an in-place upgrade from 2.3 to 3.0
|
||||
without touching the command line. ***This recipe does not work on
|
||||
Windows platforms due to a bug in the Windows version of the 2.3
|
||||
upgrade script.** See the next section for a Windows recipe.
|
||||
|
||||
##### For Mac and Linux Users:
|
||||
|
||||
1. Launch the InvokeAI launcher script in your current v2.3 root directory.
|
||||
|
||||
2. Select option [9] "Update InvokeAI" to bring up the updater dialog.
|
||||
|
||||
3. Select option [1] to upgrade to the latest release.
|
||||
|
||||
4. Once the upgrade is finished you will be returned to the launcher
|
||||
menu. Select option [6] "Re-run the configure script to fix a broken
|
||||
install or to complete a major upgrade".
|
||||
|
||||
This will run the configure script against the v2.3 directory and
|
||||
update it to the 3.0 format. The following files will be replaced:
|
||||
|
||||
- The invokeai.init file, replaced by invokeai.yaml
|
||||
- The models directory
|
||||
- The configs/models.yaml model index
|
||||
|
||||
The original versions of these files will be saved with the suffix
|
||||
".orig" appended to the end. Once you have confirmed that the upgrade
|
||||
worked, you can safely remove these files. Alternatively you can
|
||||
restore a working v2.3 directory by removing the new files and
|
||||
restoring the ".orig" files' original names.
|
||||
|
||||
##### For Windows Users:
|
||||
|
||||
Windows Users can upgrade with the
|
||||
|
||||
1. Enter the 2.3 root directory you wish to upgrade
|
||||
2. Launch `invoke.sh` or `invoke.bat`
|
||||
3. Select the "Developer's console" option [8]
|
||||
4. Type the following commands
|
||||
|
||||
```
|
||||
pip install "invokeai @ https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v3.0.0" --use-pep517 --upgrade
|
||||
invokeai-configure --root .
|
||||
```
|
||||
(Replace `v3.0.0` with the current release number if this document is out of date).
|
||||
|
||||
The first command will install and upgrade new software to run
|
||||
InvokeAI. The second will prepare the 2.3 directory for use with 3.0.
|
||||
You may now launch the WebUI in the usual way, by selecting option [1]
|
||||
from the launcher script
|
||||
|
||||
#### Migrating Images
|
||||
|
||||
The migration script will migrate your invokeai settings and models,
|
||||
including textual inversion models, LoRAs and merges that you may have
|
||||
installed previously. However it does **not** migrate the generated
|
||||
images stored in your 2.3-format outputs directory. To do this, you
|
||||
need to run an additional step:
|
||||
|
||||
1. From a working InvokeAI 3.0 root directory, start the launcher and
|
||||
enter menu option [8] to open the "developer's console".
|
||||
|
||||
2. At the developer's console command line, type the command:
|
||||
|
||||
```bash
|
||||
invokeai-import-images
|
||||
```
|
||||
|
||||
3. This will lead you through the process of confirming the desired
|
||||
source and destination for the imported images. The images will
|
||||
appear in the gallery board of your choice, and contain the
|
||||
original prompt, model name, and other parameters used to generate
|
||||
the image.
|
||||
|
||||
(Many kudos to **techjedi** for contributing this script.)
|
||||
|
||||
## Hardware Requirements
|
||||
|
||||
InvokeAI is supported across Linux, Windows and macOS. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
AMD card (using the ROCm driver).
|
||||
|
||||
### System
|
||||
|
||||
You will need one of the following:
|
||||
|
||||
- An NVIDIA-based graphics card with 4 GB or more VRAM memory. 6-8 GB
|
||||
of VRAM is highly recommended for rendering using the Stable
|
||||
Diffusion XL models
|
||||
- An Apple computer with an M1 chip.
|
||||
- An AMD-based graphics card with 4GB or more VRAM memory (Linux
|
||||
only), 6-8 GB for XL rendering.
|
||||
|
||||
We do not recommend the GTX 1650 or 1660 series video cards. They are
|
||||
unable to run in half-precision mode and do not have sufficient VRAM
|
||||
to render 512x512 images.
|
||||
|
||||
**Memory** - At least 12 GB Main Memory RAM.
|
||||
|
||||
**Disk** - At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
||||
|
||||
## Features
|
||||
|
||||
Feature documentation can be reviewed by navigating to [the InvokeAI Documentation page](https://invoke-ai.github.io/InvokeAI/features/)
|
||||
|
||||
### *Web Server & UI*
|
||||
|
||||
InvokeAI offers a locally hosted Web Server & React Frontend, with an industry leading user experience. The Web-based UI allows for simple and intuitive workflows, and is responsive for use on mobile devices and tablets accessing the web server.
|
||||
|
||||
### *Unified Canvas*
|
||||
|
||||
The Unified Canvas is a fully integrated canvas implementation with support for all core generation capabilities, in/outpainting, brush tools, and more. This creative tool unlocks the capability for artists to create with AI as a creative collaborator, and can be used to augment AI-generated imagery, sketches, photography, renders, and more.
|
||||
|
||||
### *Workflows & Nodes*
|
||||
|
||||
InvokeAI offers a fully featured workflow management solution, enabling users to combine the power of nodes based workflows with the easy of a UI. This allows for customizable generation pipelines to be developed and shared by users looking to create specific workflows to support their production use-cases.
|
||||
|
||||
### *Board & Gallery Management*
|
||||
|
||||
Invoke AI provides an organized gallery system for easily storing, accessing, and remixing your content in the Invoke workspace. Images can be dragged/dropped onto any Image-base UI element in the application, and rich metadata within the Image allows for easy recall of key prompts or settings used in your workflow.
|
||||
|
||||
### Other features
|
||||
|
||||
- *Support for both ckpt and diffusers models*
|
||||
- *SD 2.0, 2.1, XL support*
|
||||
- *Upscaling Tools*
|
||||
- *Embedding Manager & Support*
|
||||
- *Model Manager & Support*
|
||||
- *Workflow creation & management*
|
||||
- *Node-Based Architecture*
|
||||
|
||||
|
||||
### Latest Changes
|
||||
|
||||
For our latest changes, view our [Release
|
||||
Notes](https://github.com/invoke-ai/InvokeAI/releases) and the
|
||||
[CHANGELOG](docs/CHANGELOG.md).
|
||||
|
||||
### Troubleshooting / FAQ
|
||||
|
||||
Please check out our **[FAQ](https://invoke-ai.github.io/InvokeAI/help/FAQ/)** to get solutions for common installation
|
||||
problems and other issues. For more help, please join our [Discord][discord link]
|
||||
|
||||
## Contributing
|
||||
|
||||
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
||||
cleanup, testing, or code reviews, is very much encouraged to do so.
|
||||
|
||||
Get started with contributing by reading our [Contribution documentation](https://invoke-ai.github.io/InvokeAI/contributing/CONTRIBUTING/), joining the [#dev-chat](https://discord.com/channels/1020123559063990373/1049495067846524939) or the GitHub discussion board.
|
||||
|
||||
If you are unfamiliar with how
|
||||
to contribute to GitHub projects, we have a new contributor checklist you can follow to get started contributing:
|
||||
[New Contributor Checklist](https://invoke-ai.github.io/InvokeAI/contributing/contribution_guides/newContributorChecklist/).
|
||||
|
||||
We hope you enjoy using our software as much as we enjoy creating it,
|
||||
and we hope that some of those of you who are reading this will elect
|
||||
to become part of our community.
|
||||
|
||||
Welcome to InvokeAI!
|
||||
|
||||
### Contributors
|
||||
|
||||
This fork is a combined effort of various people from across the world.
|
||||
[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for
|
||||
their time, hard work and effort.
|
||||
|
||||
### Support
|
||||
|
||||
For support, please use this repository's GitHub Issues tracking service, or join the [Discord][discord link].
|
||||
|
||||
Original portions of the software are Copyright (c) 2023 by respective contributors.
|
||||
|
||||
|
@ -117,13 +117,13 @@ Stateless fields do not store their value in the node, so their field instances
|
||||
|
||||
"Custom" fields will always be treated as stateless fields.
|
||||
|
||||
##### Collection and Scalar Fields
|
||||
##### Single and Collection Fields
|
||||
|
||||
Field types have a name and two flags which may identify it as a **collection** or **collection or scalar** field.
|
||||
Field types have a name and cardinality property which may identify it as a **SINGLE**, **COLLECTION** or **SINGLE_OR_COLLECTION** field.
|
||||
|
||||
If a field is annotated in python as a list, its field type is parsed and flagged as a **collection** type (e.g. `list[int]`).
|
||||
|
||||
If it is annotated as a union of a type and list, the type will be flagged as a **collection or scalar** type (e.g. `Union[int, list[int]]`). Fields may not be unions of different types (e.g. `Union[int, list[str]]` and `Union[int, str]` are not allowed).
|
||||
- If a field is annotated in python as a singular value or class, its field type is parsed as a **SINGLE** type (e.g. `int`, `ImageField`, `str`).
|
||||
- If a field is annotated in python as a list, its field type is parsed as a **COLLECTION** type (e.g. `list[int]`).
|
||||
- If it is annotated as a union of a type and list, the type will be parsed as a **SINGLE_OR_COLLECTION** type (e.g. `Union[int, list[int]]`). Fields may not be unions of different types (e.g. `Union[int, list[str]]` and `Union[int, str]` are not allowed).
|
||||
|
||||
## Implementation
|
||||
|
||||
@ -173,8 +173,7 @@ Field types are represented as structured objects:
|
||||
```ts
|
||||
type FieldType = {
|
||||
name: string;
|
||||
isCollection: boolean;
|
||||
isCollectionOrScalar: boolean;
|
||||
cardinality: 'SINGLE' | 'COLLECTION' | 'SINGLE_OR_COLLECTION';
|
||||
};
|
||||
```
|
||||
|
||||
@ -186,7 +185,7 @@ There are 4 general cases for field type parsing.
|
||||
|
||||
When a field is annotated as a primitive values (e.g. `int`, `str`, `float`), the field type parsing is fairly straightforward. The field is represented by a simple OpenAPI **schema object**, which has a `type` property.
|
||||
|
||||
We create a field type name from this `type` string (e.g. `string` -> `StringField`).
|
||||
We create a field type name from this `type` string (e.g. `string` -> `StringField`). The cardinality is `"SINGLE"`.
|
||||
|
||||
##### Complex Types
|
||||
|
||||
@ -200,13 +199,13 @@ We need to **dereference** the schema to pull these out. Dereferencing may requi
|
||||
|
||||
When a field is annotated as a list of a single type, the schema object has an `items` property. They may be a schema object or reference object and must be parsed to determine the item type.
|
||||
|
||||
We use the item type for field type name, adding `isCollection: true` to the field type.
|
||||
We use the item type for field type name. The cardinality is `"COLLECTION"`.
|
||||
|
||||
##### Collection or Scalar Types
|
||||
##### Single or Collection Types
|
||||
|
||||
When a field is annotated as a union of a type and list of that type, the schema object has an `anyOf` property, which holds a list of valid types for the union.
|
||||
|
||||
After verifying that the union has two members (a type and list of the same type), we use the type for field type name, adding `isCollectionOrScalar: true` to the field type.
|
||||
After verifying that the union has two members (a type and list of the same type), we use the type for field type name, with cardinality `"SINGLE_OR_COLLECTION"`.
|
||||
|
||||
##### Optional Fields
|
||||
|
||||
|
@ -51,13 +51,11 @@ The settings in this file will override the defaults. You only need
|
||||
to change this file if the default for a particular setting doesn't
|
||||
work for you.
|
||||
|
||||
You'll find an example file next to `invokeai.yaml` that shows the default values.
|
||||
|
||||
Some settings, like [Model Marketplace API Keys], require the YAML
|
||||
to be formatted correctly. Here is a [basic guide to YAML files].
|
||||
|
||||
You can fix a broken `invokeai.yaml` by deleting it and running the
|
||||
configuration script again -- option [6] in the launcher, "Re-run the
|
||||
configure script".
|
||||
|
||||
#### Custom Config File Location
|
||||
|
||||
You can use any config file with the `--config` CLI arg. Pass in the path to the `invokeai.yaml` file you want to use.
|
||||
|
@ -4,278 +4,6 @@ title: Training
|
||||
|
||||
# :material-file-document: Training
|
||||
|
||||
# Textual Inversion Training
|
||||
## **Personalizing Text-to-Image Generation**
|
||||
Invoke Training has moved to its own repository, with a dedicated UI for accessing common scripts like Textual Inversion and LoRA training.
|
||||
|
||||
You may personalize the generated images to provide your own styles or objects
|
||||
by training a new LDM checkpoint and introducing a new vocabulary to the fixed
|
||||
model as a (.pt) embeddings file. Alternatively, you may use or train
|
||||
HuggingFace Concepts embeddings files (.bin) from
|
||||
<https://huggingface.co/sd-concepts-library> and its associated
|
||||
notebooks.
|
||||
|
||||
## **Hardware and Software Requirements**
|
||||
|
||||
You will need a GPU to perform training in a reasonable length of
|
||||
time, and at least 12 GB of VRAM. We recommend using the [`xformers`
|
||||
library](../installation/070_INSTALL_XFORMERS.md) to accelerate the
|
||||
training process further. During training, about ~8 GB is temporarily
|
||||
needed in order to store intermediate models, checkpoints and logs.
|
||||
|
||||
## **Preparing for Training**
|
||||
|
||||
To train, prepare a folder that contains 3-5 images that illustrate
|
||||
the object or concept. It is good to provide a variety of examples or
|
||||
poses to avoid overtraining the system. Format these images as PNG
|
||||
(preferred) or JPG. You do not need to resize or crop the images in
|
||||
advance, but for more control you may wish to do so.
|
||||
|
||||
Place the training images in a directory on the machine InvokeAI runs
|
||||
on. We recommend placing them in a subdirectory of the
|
||||
`text-inversion-training-data` folder located in the InvokeAI root
|
||||
directory, ordinarily `~/invokeai` (Linux/Mac), or
|
||||
`C:\Users\your_name\invokeai` (Windows). For example, to create an
|
||||
embedding for the "psychedelic" style, you'd place the training images
|
||||
into the directory
|
||||
`~invokeai/text-inversion-training-data/psychedelic`.
|
||||
|
||||
## **Launching Training Using the Console Front End**
|
||||
|
||||
InvokeAI 2.3 and higher comes with a text console-based training front
|
||||
end. From within the `invoke.sh`/`invoke.bat` Invoke launcher script,
|
||||
start training tool selecting choice (3):
|
||||
|
||||
```sh
|
||||
1 "Generate images with a browser-based interface"
|
||||
2 "Explore InvokeAI nodes using a command-line interface"
|
||||
3 "Textual inversion training"
|
||||
4 "Merge models (diffusers type only)"
|
||||
5 "Download and install models"
|
||||
6 "Change InvokeAI startup options"
|
||||
7 "Re-run the configure script to fix a broken install or to complete a major upgrade"
|
||||
8 "Open the developer console"
|
||||
9 "Update InvokeAI"
|
||||
```
|
||||
|
||||
Alternatively, you can select option (8) or from the command line, with the InvokeAI virtual environment active,
|
||||
you can then launch the front end with the command `invokeai-ti --gui`.
|
||||
|
||||
This will launch a text-based front end that will look like this:
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
The interface is keyboard-based. Move from field to field using
|
||||
control-N (^N) to move to the next field and control-P (^P) to the
|
||||
previous one. <Tab> and <shift-TAB> work as well. Once a field is
|
||||
active, use the cursor keys. In a checkbox group, use the up and down
|
||||
cursor keys to move from choice to choice, and <space> to select a
|
||||
choice. In a scrollbar, use the left and right cursor keys to increase
|
||||
and decrease the value of the scroll. In textfields, type the desired
|
||||
values.
|
||||
|
||||
The number of parameters may look intimidating, but in most cases the
|
||||
predefined defaults work fine. The red circled fields in the above
|
||||
illustration are the ones you will adjust most frequently.
|
||||
|
||||
### Model Name
|
||||
|
||||
This will list all the diffusers models that are currently
|
||||
installed. Select the one you wish to use as the basis for your
|
||||
embedding. Be aware that if you use a SD-1.X-based model for your
|
||||
training, you will only be able to use this embedding with other
|
||||
SD-1.X-based models. Similarly, if you train on SD-2.X, you will only
|
||||
be able to use the embeddings with models based on SD-2.X.
|
||||
|
||||
### Trigger Term
|
||||
|
||||
This is the prompt term you will use to trigger the embedding. Type a
|
||||
single word or phrase you wish to use as the trigger, example
|
||||
"psychedelic" (without angle brackets). Within InvokeAI, you will then
|
||||
be able to activate the trigger using the syntax `<psychedelic>`.
|
||||
|
||||
### Initializer
|
||||
|
||||
This is a single character that is used internally during the training
|
||||
process as a placeholder for the trigger term. It defaults to "*" and
|
||||
can usually be left alone.
|
||||
|
||||
### Resume from last saved checkpoint
|
||||
|
||||
As training proceeds, textual inversion will write a series of
|
||||
intermediate files that can be used to resume training from where it
|
||||
was left off in the case of an interruption. This checkbox will be
|
||||
automatically selected if you provide a previously used trigger term
|
||||
and at least one checkpoint file is found on disk.
|
||||
|
||||
Note that as of 20 January 2023, resume does not seem to be working
|
||||
properly due to an issue with the upstream code.
|
||||
|
||||
### Data Training Directory
|
||||
|
||||
This is the location of the images to be used for training. When you
|
||||
select a trigger term like "my-trigger", the frontend will prepopulate
|
||||
this field with `~/invokeai/text-inversion-training-data/my-trigger`,
|
||||
but you can change the path to wherever you want.
|
||||
|
||||
### Output Destination Directory
|
||||
|
||||
This is the location of the logs, checkpoint files, and embedding
|
||||
files created during training. When you select a trigger term like
|
||||
"my-trigger", the frontend will prepopulate this field with
|
||||
`~/invokeai/text-inversion-output/my-trigger`, but you can change the
|
||||
path to wherever you want.
|
||||
|
||||
### Image resolution
|
||||
|
||||
The images in the training directory will be automatically scaled to
|
||||
the value you use here. For best results, you will want to use the
|
||||
same default resolution of the underlying model (512 pixels for
|
||||
SD-1.5, 768 for the larger version of SD-2.1).
|
||||
|
||||
### Center crop images
|
||||
|
||||
If this is selected, your images will be center cropped to make them
|
||||
square before resizing them to the desired resolution. Center cropping
|
||||
can indiscriminately cut off the top of subjects' heads for portrait
|
||||
aspect images, so if you have images like this, you may wish to use a
|
||||
photoeditor to manually crop them to a square aspect ratio.
|
||||
|
||||
### Mixed precision
|
||||
|
||||
Select the floating point precision for the embedding. "no" will
|
||||
result in a full 32-bit precision, "fp16" will provide 16-bit
|
||||
precision, and "bf16" will provide mixed precision (only available
|
||||
when XFormers is used).
|
||||
|
||||
### Max training steps
|
||||
|
||||
How many steps the training will take before the model converges. Most
|
||||
training sets will converge with 2000-3000 steps.
|
||||
|
||||
### Batch size
|
||||
|
||||
This adjusts how many training images are processed simultaneously in
|
||||
each step. Higher values will cause the training process to run more
|
||||
quickly, but use more memory. The default size will run with GPUs with
|
||||
as little as 12 GB.
|
||||
|
||||
### Learning rate
|
||||
|
||||
The rate at which the system adjusts its internal weights during
|
||||
training. Higher values risk overtraining (getting the same image each
|
||||
time), and lower values will take more steps to train a good
|
||||
model. The default of 0.0005 is conservative; you may wish to increase
|
||||
it to 0.005 to speed up training.
|
||||
|
||||
### Scale learning rate by number of GPUs, steps and batch size
|
||||
|
||||
If this is selected (the default) the system will adjust the provided
|
||||
learning rate to improve performance.
|
||||
|
||||
### Use xformers acceleration
|
||||
|
||||
This will activate XFormers memory-efficient attention. You need to
|
||||
have XFormers installed for this to have an effect.
|
||||
|
||||
### Learning rate scheduler
|
||||
|
||||
This adjusts how the learning rate changes over the course of
|
||||
training. The default "constant" means to use a constant learning rate
|
||||
for the entire training session. The other values scale the learning
|
||||
rate according to various formulas.
|
||||
|
||||
Only "constant" is supported by the XFormers library.
|
||||
|
||||
### Gradient accumulation steps
|
||||
|
||||
This is a parameter that allows you to use bigger batch sizes than
|
||||
your GPU's VRAM would ordinarily accommodate, at the cost of some
|
||||
performance.
|
||||
|
||||
### Warmup steps
|
||||
|
||||
If "constant_with_warmup" is selected in the learning rate scheduler,
|
||||
then this provides the number of warmup steps. Warmup steps have a
|
||||
very low learning rate, and are one way of preventing early
|
||||
overtraining.
|
||||
|
||||
## The training run
|
||||
|
||||
Start the training run by advancing to the OK button (bottom right)
|
||||
and pressing <enter>. A series of progress messages will be displayed
|
||||
as the training process proceeds. This may take an hour or two,
|
||||
depending on settings and the speed of your system. Various log and
|
||||
checkpoint files will be written into the output directory (ordinarily
|
||||
`~/invokeai/text-inversion-output/my-model/`)
|
||||
|
||||
At the end of successful training, the system will copy the file
|
||||
`learned_embeds.bin` into the InvokeAI root directory's `embeddings`
|
||||
directory, using a subdirectory named after the trigger token. For
|
||||
example, if the trigger token was `psychedelic`, then look for the
|
||||
embeddings file in
|
||||
`~/invokeai/embeddings/psychedelic/learned_embeds.bin`
|
||||
|
||||
You may now launch InvokeAI and try out a prompt that uses the trigger
|
||||
term. For example `a plate of banana sushi in <psychedelic> style`.
|
||||
|
||||
## **Training with the Command-Line Script**
|
||||
|
||||
Training can also be done using a traditional command-line script. It
|
||||
can be launched from within the "developer's console", or from the
|
||||
command line after activating InvokeAI's virtual environment.
|
||||
|
||||
It accepts a large number of arguments, which can be summarized by
|
||||
passing the `--help` argument:
|
||||
|
||||
```sh
|
||||
invokeai-ti --help
|
||||
```
|
||||
|
||||
Typical usage is shown here:
|
||||
```sh
|
||||
invokeai-ti \
|
||||
--model=stable-diffusion-1.5 \
|
||||
--resolution=512 \
|
||||
--learnable_property=style \
|
||||
--initializer_token='*' \
|
||||
--placeholder_token='<psychedelic>' \
|
||||
--train_data_dir=/home/lstein/invokeai/training-data/psychedelic \
|
||||
--output_dir=/home/lstein/invokeai/text-inversion-training/psychedelic \
|
||||
--scale_lr \
|
||||
--train_batch_size=8 \
|
||||
--gradient_accumulation_steps=4 \
|
||||
--max_train_steps=3000 \
|
||||
--learning_rate=0.0005 \
|
||||
--resume_from_checkpoint=latest \
|
||||
--lr_scheduler=constant \
|
||||
--mixed_precision=fp16 \
|
||||
--only_save_embeds
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### `Cannot load embedding for <trigger>. It was trained on a model with token dimension 1024, but the current model has token dimension 768`
|
||||
|
||||
Messages like this indicate you trained the embedding on a different base model than the currently selected one.
|
||||
|
||||
For example, in the error above, the training was done on SD2.1 (768x768) but it was used on SD1.5 (512x512).
|
||||
|
||||
## Reading
|
||||
|
||||
For more information on textual inversion, please see the following
|
||||
resources:
|
||||
|
||||
* The [textual inversion repository](https://github.com/rinongal/textual_inversion) and
|
||||
associated paper for details and limitations.
|
||||
* [HuggingFace's textual inversion training
|
||||
page](https://huggingface.co/docs/diffusers/training/text_inversion)
|
||||
* [HuggingFace example script
|
||||
documentation](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion)
|
||||
(Note that this script is similar to, but not identical, to
|
||||
`textual_inversion`, but produces embed files that are completely compatible.
|
||||
|
||||
---
|
||||
|
||||
copyright (c) 2023, Lincoln Stein and the InvokeAI Development Team
|
||||
You can find more by visiting the repo at https://github.com/invoke-ai/invoke-training
|
||||
|
@ -1,8 +1,10 @@
|
||||
# Automatic Install
|
||||
# Automatic Install & Updates
|
||||
|
||||
The installer is used for both new installs and updates.
|
||||
**The same packaged installer file can be used for both new installs and updates.**
|
||||
Using the installer for updates will leave everything you've added since installation, and just update the core libraries used to run Invoke.
|
||||
Simply use the same path you installed to originally.
|
||||
|
||||
Both release and pre-release versions can be installed using it. It also supports install a wheel if needed.
|
||||
Both release and pre-release versions can be installed using the installer. It also supports install through a wheel if needed.
|
||||
|
||||
Be sure to review the [installation requirements] and ensure your system has everything it needs to install Invoke.
|
||||
|
||||
@ -96,7 +98,7 @@ Updating is exactly the same as installing - download the latest installer, choo
|
||||
|
||||
If you have installation issues, please review the [FAQ]. You can also [create an issue] or ask for help on [discord].
|
||||
|
||||
[installation requirements]: INSTALLATION.md#installation-requirements
|
||||
[installation requirements]: INSTALL_REQUIREMENTS.md
|
||||
[FAQ]: ../help/FAQ.md
|
||||
[install some models]: 050_INSTALLING_MODELS.md
|
||||
[configuration docs]: ../features/CONFIGURATION.md
|
||||
|
@ -10,7 +10,7 @@ InvokeAI is distributed as a python package on PyPI, installable with `pip`. The
|
||||
|
||||
### Requirements
|
||||
|
||||
Before you start, go through the [installation requirements].
|
||||
Before you start, go through the [installation requirements](./INSTALL_REQUIREMENTS.md).
|
||||
|
||||
### Installation Walkthrough
|
||||
|
||||
@ -79,7 +79,7 @@ Before you start, go through the [installation requirements].
|
||||
|
||||
1. Install the InvokeAI Package. The base command is `pip install InvokeAI --use-pep517`, but you may need to change this depending on your system and the desired features.
|
||||
|
||||
- You may need to provide an [extra index URL]. Select your platform configuration using [this tool on the PyTorch website]. Copy the `--extra-index-url` string from this and append it to your install command.
|
||||
- You may need to provide an [extra index URL](https://pip.pypa.io/en/stable/cli/pip_install/#cmdoption-extra-index-url). Select your platform configuration using [this tool on the PyTorch website](https://pytorch.org/get-started/locally/). Copy the `--extra-index-url` string from this and append it to your install command.
|
||||
|
||||
!!! example "Install with an extra index URL"
|
||||
|
||||
@ -116,4 +116,4 @@ Before you start, go through the [installation requirements].
|
||||
|
||||
!!! warning
|
||||
|
||||
If the virtual environment is _not_ inside the root directory, then you _must_ specify the path to the root directory with `--root_dir \path\to\invokeai` or the `INVOKEAI_ROOT` environment variable.
|
||||
If the virtual environment is _not_ inside the root directory, then you _must_ specify the path to the root directory with `--root \path\to\invokeai` or the `INVOKEAI_ROOT` environment variable.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Installation Overview
|
||||
# Installation and Updating Overview
|
||||
|
||||
Before installing, review the [installation requirements] to ensure your system is set up properly.
|
||||
|
||||
@ -6,14 +6,21 @@ See the [FAQ] for frequently-encountered installation issues.
|
||||
|
||||
If you need more help, join our [discord] or [create an issue].
|
||||
|
||||
<h2>Automatic Install</h2>
|
||||
<h2>Automatic Install & Updates </h2>
|
||||
|
||||
✅ The automatic install is the best way to run InvokeAI. Check out the [installation guide] to get started.
|
||||
|
||||
⬆️ The same installer is also the best way to update InvokeAI - Simply rerun it for the same folder you installed to.
|
||||
|
||||
The installation process simply manages installation for the core libraries & application dependencies that run Invoke.
|
||||
Any models, images, or other assets in the Invoke root folder won't be affected by the installation process.
|
||||
|
||||
<h2>Manual Install</h2>
|
||||
|
||||
If you are familiar with python and want more control over the packages that are installed, you can [install InvokeAI manually via PyPI].
|
||||
|
||||
Updates are managed by reinstalling the latest version through PyPi.
|
||||
|
||||
<h2>Developer Install</h2>
|
||||
|
||||
If you want to contribute to InvokeAI, consult the [developer install guide].
|
||||
|
@ -37,13 +37,13 @@ Invoke runs best with a dedicated GPU, but will fall back to running on CPU, alb
|
||||
=== "Nvidia"
|
||||
|
||||
```
|
||||
Any GPU with at least 8GB VRAM. Linux only.
|
||||
Any GPU with at least 8GB VRAM.
|
||||
```
|
||||
|
||||
=== "AMD"
|
||||
|
||||
```
|
||||
Any GPU with at least 16GB VRAM.
|
||||
Any GPU with at least 16GB VRAM. Linux only.
|
||||
```
|
||||
|
||||
=== "Mac"
|
||||
|
@ -18,6 +18,7 @@ from ..services.boards.boards_default import BoardService
|
||||
from ..services.bulk_download.bulk_download_default import BulkDownloadService
|
||||
from ..services.config import InvokeAIAppConfig
|
||||
from ..services.download import DownloadQueueService
|
||||
from ..services.events.events_fastapievents import FastAPIEventService
|
||||
from ..services.image_files.image_files_disk import DiskImageFileStorage
|
||||
from ..services.image_records.image_records_sqlite import SqliteImageRecordStorage
|
||||
from ..services.images.images_default import ImageService
|
||||
@ -33,7 +34,6 @@ from ..services.session_processor.session_processor_default import DefaultSessio
|
||||
from ..services.session_queue.session_queue_sqlite import SqliteSessionQueue
|
||||
from ..services.urls.urls_default import LocalUrlService
|
||||
from ..services.workflow_records.workflow_records_sqlite import SqliteWorkflowRecordsStorage
|
||||
from .events import FastAPIEventService
|
||||
|
||||
|
||||
# TODO: is there a better way to achieve this?
|
||||
|
@ -1,52 +0,0 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
import asyncio
|
||||
import threading
|
||||
from queue import Empty, Queue
|
||||
from typing import Any
|
||||
|
||||
from fastapi_events.dispatcher import dispatch
|
||||
|
||||
from ..services.events.events_base import EventServiceBase
|
||||
|
||||
|
||||
class FastAPIEventService(EventServiceBase):
|
||||
event_handler_id: int
|
||||
__queue: Queue
|
||||
__stop_event: threading.Event
|
||||
|
||||
def __init__(self, event_handler_id: int) -> None:
|
||||
self.event_handler_id = event_handler_id
|
||||
self.__queue = Queue()
|
||||
self.__stop_event = threading.Event()
|
||||
asyncio.create_task(self.__dispatch_from_queue(stop_event=self.__stop_event))
|
||||
|
||||
super().__init__()
|
||||
|
||||
def stop(self, *args, **kwargs):
|
||||
self.__stop_event.set()
|
||||
self.__queue.put(None)
|
||||
|
||||
def dispatch(self, event_name: str, payload: Any) -> None:
|
||||
self.__queue.put({"event_name": event_name, "payload": payload})
|
||||
|
||||
async def __dispatch_from_queue(self, stop_event: threading.Event):
|
||||
"""Get events on from the queue and dispatch them, from the correct thread"""
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
event = self.__queue.get(block=False)
|
||||
if not event: # Probably stopping
|
||||
continue
|
||||
|
||||
dispatch(
|
||||
event.get("event_name"),
|
||||
payload=event.get("payload"),
|
||||
middleware_id=self.event_handler_id,
|
||||
)
|
||||
|
||||
except Empty:
|
||||
await asyncio.sleep(0.1)
|
||||
pass
|
||||
|
||||
except asyncio.CancelledError as e:
|
||||
raise e # Raise a proper error
|
@ -13,7 +13,6 @@ from pydantic import BaseModel, Field
|
||||
from invokeai.app.invocations.upscale import ESRGAN_MODELS
|
||||
from invokeai.app.services.invocation_cache.invocation_cache_common import InvocationCacheStatus
|
||||
from invokeai.backend.image_util.infill_methods.patchmatch import PatchMatch
|
||||
from invokeai.backend.image_util.safety_checker import SafetyChecker
|
||||
from invokeai.backend.util.logging import logging
|
||||
from invokeai.version import __version__
|
||||
|
||||
@ -109,9 +108,7 @@ async def get_config() -> AppConfig:
|
||||
upscaling_models.append(str(Path(model).stem))
|
||||
upscaler = Upscaler(upscaling_method="esrgan", upscaling_models=upscaling_models)
|
||||
|
||||
nsfw_methods = []
|
||||
if SafetyChecker.safety_checker_available():
|
||||
nsfw_methods.append("nsfw_checker")
|
||||
nsfw_methods = ["nsfw_checker"]
|
||||
|
||||
watermarking_methods = ["invisible_watermark"]
|
||||
|
||||
|
@ -6,13 +6,12 @@ from fastapi import BackgroundTasks, Body, HTTPException, Path, Query, Request,
|
||||
from fastapi.responses import FileResponse
|
||||
from fastapi.routing import APIRouter
|
||||
from PIL import Image
|
||||
from pydantic import BaseModel, Field, ValidationError
|
||||
from pydantic import BaseModel, Field, JsonValue
|
||||
|
||||
from invokeai.app.invocations.fields import MetadataField, MetadataFieldValidator
|
||||
from invokeai.app.invocations.fields import MetadataField
|
||||
from invokeai.app.services.image_records.image_records_common import ImageCategory, ImageRecordChanges, ResourceOrigin
|
||||
from invokeai.app.services.images.images_common import ImageDTO, ImageUrlsDTO
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID, WorkflowWithoutIDValidator
|
||||
|
||||
from ..dependencies import ApiDependencies
|
||||
|
||||
@ -42,13 +41,17 @@ async def upload_image(
|
||||
board_id: Optional[str] = Query(default=None, description="The board to add this image to, if any"),
|
||||
session_id: Optional[str] = Query(default=None, description="The session ID associated with this upload, if any"),
|
||||
crop_visible: Optional[bool] = Query(default=False, description="Whether to crop the image"),
|
||||
metadata: Optional[JsonValue] = Body(
|
||||
default=None, description="The metadata to associate with the image", embed=True
|
||||
),
|
||||
) -> ImageDTO:
|
||||
"""Uploads an image"""
|
||||
if not file.content_type or not file.content_type.startswith("image"):
|
||||
raise HTTPException(status_code=415, detail="Not an image")
|
||||
|
||||
metadata = None
|
||||
workflow = None
|
||||
_metadata = None
|
||||
_workflow = None
|
||||
_graph = None
|
||||
|
||||
contents = await file.read()
|
||||
try:
|
||||
@ -62,22 +65,28 @@ async def upload_image(
|
||||
|
||||
# TODO: retain non-invokeai metadata on upload?
|
||||
# attempt to parse metadata from image
|
||||
metadata_raw = pil_image.info.get("invokeai_metadata", None)
|
||||
if metadata_raw:
|
||||
try:
|
||||
metadata = MetadataFieldValidator.validate_json(metadata_raw)
|
||||
except ValidationError:
|
||||
ApiDependencies.invoker.services.logger.warn("Failed to parse metadata for uploaded image")
|
||||
pass
|
||||
metadata_raw = metadata if isinstance(metadata, str) else pil_image.info.get("invokeai_metadata", None)
|
||||
if isinstance(metadata_raw, str):
|
||||
_metadata = metadata_raw
|
||||
else:
|
||||
ApiDependencies.invoker.services.logger.warn("Failed to parse metadata for uploaded image")
|
||||
pass
|
||||
|
||||
# attempt to parse workflow from image
|
||||
workflow_raw = pil_image.info.get("invokeai_workflow", None)
|
||||
if workflow_raw is not None:
|
||||
try:
|
||||
workflow = WorkflowWithoutIDValidator.validate_json(workflow_raw)
|
||||
except ValidationError:
|
||||
ApiDependencies.invoker.services.logger.warn("Failed to parse metadata for uploaded image")
|
||||
pass
|
||||
if isinstance(workflow_raw, str):
|
||||
_workflow = workflow_raw
|
||||
else:
|
||||
ApiDependencies.invoker.services.logger.warn("Failed to parse workflow for uploaded image")
|
||||
pass
|
||||
|
||||
# attempt to extract graph from image
|
||||
graph_raw = pil_image.info.get("invokeai_graph", None)
|
||||
if isinstance(graph_raw, str):
|
||||
_graph = graph_raw
|
||||
else:
|
||||
ApiDependencies.invoker.services.logger.warn("Failed to parse graph for uploaded image")
|
||||
pass
|
||||
|
||||
try:
|
||||
image_dto = ApiDependencies.invoker.services.images.create(
|
||||
@ -86,8 +95,9 @@ async def upload_image(
|
||||
image_category=image_category,
|
||||
session_id=session_id,
|
||||
board_id=board_id,
|
||||
metadata=metadata,
|
||||
workflow=workflow,
|
||||
metadata=_metadata,
|
||||
workflow=_workflow,
|
||||
graph=_graph,
|
||||
is_intermediate=is_intermediate,
|
||||
)
|
||||
|
||||
@ -185,14 +195,21 @@ async def get_image_metadata(
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
||||
class WorkflowAndGraphResponse(BaseModel):
|
||||
workflow: Optional[str] = Field(description="The workflow used to generate the image, as stringified JSON")
|
||||
graph: Optional[str] = Field(description="The graph used to generate the image, as stringified JSON")
|
||||
|
||||
|
||||
@images_router.get(
|
||||
"/i/{image_name}/workflow", operation_id="get_image_workflow", response_model=Optional[WorkflowWithoutID]
|
||||
"/i/{image_name}/workflow", operation_id="get_image_workflow", response_model=WorkflowAndGraphResponse
|
||||
)
|
||||
async def get_image_workflow(
|
||||
image_name: str = Path(description="The name of image whose workflow to get"),
|
||||
) -> Optional[WorkflowWithoutID]:
|
||||
) -> WorkflowAndGraphResponse:
|
||||
try:
|
||||
return ApiDependencies.invoker.services.images.get_workflow(image_name)
|
||||
workflow = ApiDependencies.invoker.services.images.get_workflow(image_name)
|
||||
graph = ApiDependencies.invoker.services.images.get_graph(image_name)
|
||||
return WorkflowAndGraphResponse(workflow=workflow, graph=graph)
|
||||
except Exception:
|
||||
raise HTTPException(status_code=404)
|
||||
|
||||
|
@ -6,7 +6,7 @@ import pathlib
|
||||
import shutil
|
||||
import traceback
|
||||
from copy import deepcopy
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any, Dict, List, Optional, Type
|
||||
|
||||
from fastapi import Body, Path, Query, Response, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
@ -16,7 +16,8 @@ from pydantic import AnyHttpUrl, BaseModel, ConfigDict, Field
|
||||
from starlette.exceptions import HTTPException
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from invokeai.app.services.model_install import ModelInstallJob
|
||||
from invokeai.app.services.model_images.model_images_common import ModelImageFileNotFoundException
|
||||
from invokeai.app.services.model_install.model_install_common import ModelInstallJob
|
||||
from invokeai.app.services.model_records import (
|
||||
DuplicateModelException,
|
||||
InvalidModelException,
|
||||
@ -52,6 +53,13 @@ class ModelsList(BaseModel):
|
||||
model_config = ConfigDict(use_enum_values=True)
|
||||
|
||||
|
||||
def add_cover_image_to_model_config(config: AnyModelConfig, dependencies: Type[ApiDependencies]) -> AnyModelConfig:
|
||||
"""Add a cover image URL to a model configuration."""
|
||||
cover_image = dependencies.invoker.services.model_images.get_url(config.key)
|
||||
config.cover_image = cover_image
|
||||
return config
|
||||
|
||||
|
||||
##############################################################################
|
||||
# These are example inputs and outputs that are used in places where Swagger
|
||||
# is unable to generate a correct example.
|
||||
@ -118,8 +126,7 @@ async def list_model_records(
|
||||
record_store.search_by_attr(model_type=model_type, model_name=model_name, model_format=model_format)
|
||||
)
|
||||
for model in found_models:
|
||||
cover_image = ApiDependencies.invoker.services.model_images.get_url(model.key)
|
||||
model.cover_image = cover_image
|
||||
model = add_cover_image_to_model_config(model, ApiDependencies)
|
||||
return ModelsList(models=found_models)
|
||||
|
||||
|
||||
@ -160,12 +167,9 @@ async def get_model_record(
|
||||
key: str = Path(description="Key of the model record to fetch."),
|
||||
) -> AnyModelConfig:
|
||||
"""Get a model record"""
|
||||
record_store = ApiDependencies.invoker.services.model_manager.store
|
||||
try:
|
||||
config: AnyModelConfig = record_store.get_model(key)
|
||||
cover_image = ApiDependencies.invoker.services.model_images.get_url(key)
|
||||
config.cover_image = cover_image
|
||||
return config
|
||||
config = ApiDependencies.invoker.services.model_manager.store.get_model(key)
|
||||
return add_cover_image_to_model_config(config, ApiDependencies)
|
||||
except UnknownModelException as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
|
||||
@ -294,14 +298,15 @@ async def update_model_record(
|
||||
installer = ApiDependencies.invoker.services.model_manager.install
|
||||
try:
|
||||
record_store.update_model(key, changes=changes)
|
||||
model_response: AnyModelConfig = installer.sync_model_path(key)
|
||||
config = installer.sync_model_path(key)
|
||||
config = add_cover_image_to_model_config(config, ApiDependencies)
|
||||
logger.info(f"Updated model: {key}")
|
||||
except UnknownModelException as e:
|
||||
raise HTTPException(status_code=404, detail=str(e))
|
||||
except ValueError as e:
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
return model_response
|
||||
return config
|
||||
|
||||
|
||||
@model_manager_router.get(
|
||||
@ -648,6 +653,14 @@ async def convert_model(
|
||||
logger.error(str(e))
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
|
||||
# Update the model image if the model had one
|
||||
try:
|
||||
model_image = ApiDependencies.invoker.services.model_images.get(key)
|
||||
ApiDependencies.invoker.services.model_images.save(model_image, new_key)
|
||||
ApiDependencies.invoker.services.model_images.delete(key)
|
||||
except ModelImageFileNotFoundException:
|
||||
pass
|
||||
|
||||
# delete the original safetensors file
|
||||
installer.delete(key)
|
||||
|
||||
@ -655,7 +668,8 @@ async def convert_model(
|
||||
shutil.rmtree(cache_path)
|
||||
|
||||
# return the config record for the new diffusers directory
|
||||
new_config: AnyModelConfig = store.get_model(new_key)
|
||||
new_config = store.get_model(new_key)
|
||||
new_config = add_cover_image_to_model_config(new_config, ApiDependencies)
|
||||
return new_config
|
||||
|
||||
|
||||
|
@ -203,6 +203,7 @@ async def get_batch_status(
|
||||
responses={
|
||||
200: {"model": SessionQueueItem},
|
||||
},
|
||||
response_model_exclude_none=True,
|
||||
)
|
||||
async def get_queue_item(
|
||||
queue_id: str = Path(description="The queue id to perform this operation on"),
|
||||
|
@ -1,66 +1,131 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
from typing import Any
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi_events.handlers.local import local_handler
|
||||
from fastapi_events.typing import Event
|
||||
from pydantic import BaseModel
|
||||
from socketio import ASGIApp, AsyncServer
|
||||
|
||||
from ..services.events.events_base import EventServiceBase
|
||||
from invokeai.app.services.events.events_common import (
|
||||
BatchEnqueuedEvent,
|
||||
BulkDownloadCompleteEvent,
|
||||
BulkDownloadErrorEvent,
|
||||
BulkDownloadEventBase,
|
||||
BulkDownloadStartedEvent,
|
||||
DownloadCancelledEvent,
|
||||
DownloadCompleteEvent,
|
||||
DownloadErrorEvent,
|
||||
DownloadProgressEvent,
|
||||
DownloadStartedEvent,
|
||||
FastAPIEvent,
|
||||
InvocationCompleteEvent,
|
||||
InvocationDenoiseProgressEvent,
|
||||
InvocationErrorEvent,
|
||||
InvocationStartedEvent,
|
||||
ModelEventBase,
|
||||
ModelInstallCancelledEvent,
|
||||
ModelInstallCompleteEvent,
|
||||
ModelInstallDownloadProgressEvent,
|
||||
ModelInstallDownloadsCompleteEvent,
|
||||
ModelInstallErrorEvent,
|
||||
ModelInstallStartedEvent,
|
||||
ModelLoadCompleteEvent,
|
||||
ModelLoadStartedEvent,
|
||||
QueueClearedEvent,
|
||||
QueueEventBase,
|
||||
QueueItemStatusChangedEvent,
|
||||
SessionCanceledEvent,
|
||||
SessionCompleteEvent,
|
||||
SessionStartedEvent,
|
||||
register_events,
|
||||
)
|
||||
|
||||
|
||||
class QueueSubscriptionEvent(BaseModel):
|
||||
queue_id: str
|
||||
|
||||
|
||||
class BulkDownloadSubscriptionEvent(BaseModel):
|
||||
bulk_download_id: str
|
||||
|
||||
|
||||
class SocketIO:
|
||||
__sio: AsyncServer
|
||||
__app: ASGIApp
|
||||
_sub_queue = "subscribe_queue"
|
||||
_unsub_queue = "unsubscribe_queue"
|
||||
|
||||
__sub_queue: str = "subscribe_queue"
|
||||
__unsub_queue: str = "unsubscribe_queue"
|
||||
|
||||
__sub_bulk_download: str = "subscribe_bulk_download"
|
||||
__unsub_bulk_download: str = "unsubscribe_bulk_download"
|
||||
_sub_bulk_download = "subscribe_bulk_download"
|
||||
_unsub_bulk_download = "unsubscribe_bulk_download"
|
||||
|
||||
def __init__(self, app: FastAPI):
|
||||
self.__sio = AsyncServer(async_mode="asgi", cors_allowed_origins="*")
|
||||
self.__app = ASGIApp(socketio_server=self.__sio, socketio_path="/ws/socket.io")
|
||||
app.mount("/ws", self.__app)
|
||||
self._sio = AsyncServer(async_mode="asgi", cors_allowed_origins="*")
|
||||
self._app = ASGIApp(socketio_server=self._sio, socketio_path="/ws/socket.io")
|
||||
app.mount("/ws", self._app)
|
||||
|
||||
self.__sio.on(self.__sub_queue, handler=self._handle_sub_queue)
|
||||
self.__sio.on(self.__unsub_queue, handler=self._handle_unsub_queue)
|
||||
local_handler.register(event_name=EventServiceBase.queue_event, _func=self._handle_queue_event)
|
||||
local_handler.register(event_name=EventServiceBase.model_event, _func=self._handle_model_event)
|
||||
self._sio.on(self._sub_queue, handler=self._handle_sub_queue)
|
||||
self._sio.on(self._unsub_queue, handler=self._handle_unsub_queue)
|
||||
self._sio.on(self._sub_bulk_download, handler=self._handle_sub_bulk_download)
|
||||
self._sio.on(self._unsub_bulk_download, handler=self._handle_unsub_bulk_download)
|
||||
|
||||
self.__sio.on(self.__sub_bulk_download, handler=self._handle_sub_bulk_download)
|
||||
self.__sio.on(self.__unsub_bulk_download, handler=self._handle_unsub_bulk_download)
|
||||
local_handler.register(event_name=EventServiceBase.bulk_download_event, _func=self._handle_bulk_download_event)
|
||||
|
||||
async def _handle_queue_event(self, event: Event):
|
||||
await self.__sio.emit(
|
||||
event=event[1]["event"],
|
||||
data=event[1]["data"],
|
||||
room=event[1]["data"]["queue_id"],
|
||||
register_events(
|
||||
{
|
||||
InvocationStartedEvent,
|
||||
InvocationDenoiseProgressEvent,
|
||||
InvocationCompleteEvent,
|
||||
InvocationErrorEvent,
|
||||
SessionStartedEvent,
|
||||
SessionCompleteEvent,
|
||||
SessionCanceledEvent,
|
||||
QueueItemStatusChangedEvent,
|
||||
BatchEnqueuedEvent,
|
||||
QueueClearedEvent,
|
||||
},
|
||||
self._handle_queue_event,
|
||||
)
|
||||
|
||||
async def _handle_sub_queue(self, sid, data, *args, **kwargs) -> None:
|
||||
if "queue_id" in data:
|
||||
await self.__sio.enter_room(sid, data["queue_id"])
|
||||
|
||||
async def _handle_unsub_queue(self, sid, data, *args, **kwargs) -> None:
|
||||
if "queue_id" in data:
|
||||
await self.__sio.leave_room(sid, data["queue_id"])
|
||||
|
||||
async def _handle_model_event(self, event: Event) -> None:
|
||||
await self.__sio.emit(event=event[1]["event"], data=event[1]["data"])
|
||||
|
||||
async def _handle_bulk_download_event(self, event: Event):
|
||||
await self.__sio.emit(
|
||||
event=event[1]["event"],
|
||||
data=event[1]["data"],
|
||||
room=event[1]["data"]["bulk_download_id"],
|
||||
register_events(
|
||||
{
|
||||
DownloadCancelledEvent,
|
||||
DownloadCompleteEvent,
|
||||
DownloadErrorEvent,
|
||||
DownloadProgressEvent,
|
||||
DownloadStartedEvent,
|
||||
ModelLoadStartedEvent,
|
||||
ModelLoadCompleteEvent,
|
||||
ModelInstallDownloadProgressEvent,
|
||||
ModelInstallDownloadsCompleteEvent,
|
||||
ModelInstallStartedEvent,
|
||||
ModelInstallCompleteEvent,
|
||||
ModelInstallCancelledEvent,
|
||||
ModelInstallErrorEvent,
|
||||
},
|
||||
self._handle_model_event,
|
||||
)
|
||||
|
||||
async def _handle_sub_bulk_download(self, sid, data, *args, **kwargs):
|
||||
if "bulk_download_id" in data:
|
||||
await self.__sio.enter_room(sid, data["bulk_download_id"])
|
||||
register_events(
|
||||
{BulkDownloadStartedEvent, BulkDownloadCompleteEvent, BulkDownloadErrorEvent},
|
||||
self._handle_bulk_image_download_event,
|
||||
)
|
||||
|
||||
async def _handle_unsub_bulk_download(self, sid, data, *args, **kwargs):
|
||||
if "bulk_download_id" in data:
|
||||
await self.__sio.leave_room(sid, data["bulk_download_id"])
|
||||
async def _handle_sub_queue(self, sid: str, data: Any) -> None:
|
||||
await self._sio.enter_room(sid, QueueSubscriptionEvent(**data).queue_id)
|
||||
|
||||
async def _handle_unsub_queue(self, sid: str, data: Any) -> None:
|
||||
await self._sio.leave_room(sid, QueueSubscriptionEvent(**data).queue_id)
|
||||
|
||||
async def _handle_sub_bulk_download(self, sid: str, data: Any) -> None:
|
||||
await self._sio.enter_room(sid, BulkDownloadSubscriptionEvent(**data).bulk_download_id)
|
||||
|
||||
async def _handle_unsub_bulk_download(self, sid: str, data: Any) -> None:
|
||||
await self._sio.leave_room(sid, BulkDownloadSubscriptionEvent(**data).bulk_download_id)
|
||||
|
||||
async def _handle_queue_event(self, event: FastAPIEvent[QueueEventBase]):
|
||||
event_name, payload = event
|
||||
await self._sio.emit(event=event_name, data=payload.model_dump(mode="json"), room=payload.queue_id)
|
||||
|
||||
async def _handle_model_event(self, event: FastAPIEvent[ModelEventBase]) -> None:
|
||||
event_name, payload = event
|
||||
await self._sio.emit(event=event_name, data=payload.model_dump(mode="json"))
|
||||
|
||||
async def _handle_bulk_image_download_event(self, event: FastAPIEvent[BulkDownloadEventBase]) -> None:
|
||||
event_name, payload = event
|
||||
await self._sio.emit(event=event_name, data=payload.model_dump(mode="json"), room=payload.bulk_download_id)
|
||||
|
@ -27,8 +27,9 @@ import invokeai.frontend.web as web_dir
|
||||
from invokeai.app.api.no_cache_staticfiles import NoCacheStaticFiles
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.services.events.events_common import EventBase
|
||||
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
|
||||
from invokeai.backend.util.devices import get_torch_device_name
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
from ..backend.util.logging import InvokeAILogger
|
||||
from .api.dependencies import ApiDependencies
|
||||
@ -63,7 +64,7 @@ logger = InvokeAILogger.get_logger(config=app_config)
|
||||
mimetypes.add_type("application/javascript", ".js")
|
||||
mimetypes.add_type("text/css", ".css")
|
||||
|
||||
torch_device_name = get_torch_device_name()
|
||||
torch_device_name = TorchDevice.get_torch_device_name()
|
||||
logger.info(f"Using torch device: {torch_device_name}")
|
||||
|
||||
|
||||
@ -164,6 +165,12 @@ def custom_openapi() -> dict[str, Any]:
|
||||
for schema_key, schema_json in additional_schemas[1]["$defs"].items():
|
||||
openapi_schema["components"]["schemas"][schema_key] = schema_json
|
||||
|
||||
openapi_schema["components"]["schemas"]["InvocationOutputMap"] = {
|
||||
"type": "object",
|
||||
"properties": {},
|
||||
"required": [],
|
||||
}
|
||||
|
||||
# Add a reference to the output type to additionalProperties of the invoker schema
|
||||
for invoker in all_invocations:
|
||||
invoker_name = invoker.__name__ # type: ignore [attr-defined] # this is a valid attribute
|
||||
@ -172,25 +179,18 @@ def custom_openapi() -> dict[str, Any]:
|
||||
invoker_schema = openapi_schema["components"]["schemas"][f"{invoker_name}"]
|
||||
outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"}
|
||||
invoker_schema["output"] = outputs_ref
|
||||
openapi_schema["components"]["schemas"]["InvocationOutputMap"]["properties"][invoker.get_type()] = outputs_ref
|
||||
openapi_schema["components"]["schemas"]["InvocationOutputMap"]["required"].append(invoker.get_type())
|
||||
invoker_schema["class"] = "invocation"
|
||||
|
||||
# This code no longer seems to be necessary?
|
||||
# Leave it here just in case
|
||||
#
|
||||
# from invokeai.backend.model_manager import get_model_config_formats
|
||||
# formats = get_model_config_formats()
|
||||
# for model_config_name, enum_set in formats.items():
|
||||
|
||||
# if model_config_name in openapi_schema["components"]["schemas"]:
|
||||
# # print(f"Config with name {name} already defined")
|
||||
# continue
|
||||
|
||||
# openapi_schema["components"]["schemas"][model_config_name] = {
|
||||
# "title": model_config_name,
|
||||
# "description": "An enumeration.",
|
||||
# "type": "string",
|
||||
# "enum": [v.value for v in enum_set],
|
||||
# }
|
||||
# Add all event schemas
|
||||
for event in sorted(EventBase.get_events(), key=lambda e: e.__name__):
|
||||
json_schema = event.model_json_schema(mode="serialization", ref_template="#/components/schemas/{model}")
|
||||
if "$defs" in json_schema:
|
||||
for schema_key, schema in json_schema["$defs"].items():
|
||||
openapi_schema["components"]["schemas"][schema_key] = schema
|
||||
del json_schema["$defs"]
|
||||
openapi_schema["components"]["schemas"][event.__name__] = json_schema
|
||||
|
||||
app.openapi_schema = openapi_schema
|
||||
return app.openapi_schema
|
||||
|
@ -24,7 +24,7 @@ from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
|
||||
ConditioningFieldData,
|
||||
SDXLConditioningInfo,
|
||||
)
|
||||
from invokeai.backend.util.devices import torch_dtype
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
from .baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from .model import CLIPField
|
||||
@ -99,7 +99,7 @@ class CompelInvocation(BaseInvocation):
|
||||
tokenizer=tokenizer,
|
||||
text_encoder=text_encoder,
|
||||
textual_inversion_manager=ti_manager,
|
||||
dtype_for_device_getter=torch_dtype,
|
||||
dtype_for_device_getter=TorchDevice.choose_torch_dtype,
|
||||
truncate_long_prompts=False,
|
||||
)
|
||||
|
||||
@ -193,7 +193,7 @@ class SDXLPromptInvocationBase:
|
||||
tokenizer=tokenizer,
|
||||
text_encoder=text_encoder,
|
||||
textual_inversion_manager=ti_manager,
|
||||
dtype_for_device_getter=torch_dtype,
|
||||
dtype_for_device_getter=TorchDevice.choose_torch_dtype,
|
||||
truncate_long_prompts=False, # TODO:
|
||||
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, # TODO: clip skip
|
||||
requires_pooled=get_pooled,
|
||||
|
@ -24,7 +24,6 @@ from pydantic import BaseModel, Field, field_validator, model_validator
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
ImageField,
|
||||
Input,
|
||||
InputField,
|
||||
OutputField,
|
||||
UIType,
|
||||
@ -35,22 +34,16 @@ from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import ImageOutput
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES, heuristic_resize
|
||||
from invokeai.backend.image_util.canny import get_canny_edges
|
||||
from invokeai.backend.image_util.depth_anything import DepthAnythingDetector
|
||||
from invokeai.backend.image_util.dw_openpose import DWOpenposeDetector
|
||||
from invokeai.backend.image_util.hed import HEDProcessor
|
||||
from invokeai.backend.image_util.lineart import LineartProcessor
|
||||
from invokeai.backend.image_util.lineart_anime import LineartAnimeProcessor
|
||||
from invokeai.backend.image_util.util import np_to_pil, pil_to_np
|
||||
|
||||
from .baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
|
||||
CONTROLNET_MODE_VALUES = Literal["balanced", "more_prompt", "more_control", "unbalanced"]
|
||||
CONTROLNET_RESIZE_VALUES = Literal[
|
||||
"just_resize",
|
||||
"crop_resize",
|
||||
"fill_resize",
|
||||
"just_resize_simple",
|
||||
]
|
||||
from .baseinvocation import BaseInvocation, BaseInvocationOutput, Classification, invocation, invocation_output
|
||||
|
||||
|
||||
class ControlField(BaseModel):
|
||||
@ -86,13 +79,13 @@ class ControlOutput(BaseInvocationOutput):
|
||||
control: ControlField = OutputField(description=FieldDescriptions.control)
|
||||
|
||||
|
||||
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.1")
|
||||
@invocation("controlnet", title="ControlNet", tags=["controlnet"], category="controlnet", version="1.1.2")
|
||||
class ControlNetInvocation(BaseInvocation):
|
||||
"""Collects ControlNet info to pass to other nodes"""
|
||||
|
||||
image: ImageField = InputField(description="The control image")
|
||||
control_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.controlnet_model, input=Input.Direct, ui_type=UIType.ControlNetModel
|
||||
description=FieldDescriptions.controlnet_model, ui_type=UIType.ControlNetModel
|
||||
)
|
||||
control_weight: Union[float, List[float]] = InputField(
|
||||
default=1.0, ge=-1, le=2, description="The weight given to the ControlNet"
|
||||
@ -171,13 +164,13 @@ class ImageProcessorInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Canny Processor",
|
||||
tags=["controlnet", "canny"],
|
||||
category="controlnet",
|
||||
version="1.3.2",
|
||||
version="1.3.3",
|
||||
)
|
||||
class CannyImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Canny edge detection for ControlNet"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
low_threshold: int = InputField(
|
||||
default=100, ge=0, le=255, description="The low threshold of the Canny pixel gradient (0-255)"
|
||||
)
|
||||
@ -205,13 +198,13 @@ class CannyImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="HED (softedge) Processor",
|
||||
tags=["controlnet", "hed", "softedge"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class HedImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies HED edge detection to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
# safe not supported in controlnet_aux v0.0.3
|
||||
# safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
|
||||
scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
|
||||
@ -234,13 +227,13 @@ class HedImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Lineart Processor",
|
||||
tags=["controlnet", "lineart"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class LineartImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies line art processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
coarse: bool = InputField(default=False, description="Whether to use coarse mode")
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
@ -256,13 +249,13 @@ class LineartImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Lineart Anime Processor",
|
||||
tags=["controlnet", "lineart", "anime"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies line art anime processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image) -> Image.Image:
|
||||
processor = LineartAnimeProcessor()
|
||||
@ -279,15 +272,15 @@ class LineartAnimeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Midas Depth Processor",
|
||||
tags=["controlnet", "midas"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
version="1.2.4",
|
||||
)
|
||||
class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies Midas depth processing to image"""
|
||||
|
||||
a_mult: float = InputField(default=2.0, ge=0, description="Midas parameter `a_mult` (a = a_mult * PI)")
|
||||
bg_th: float = InputField(default=0.1, ge=0, description="Midas parameter `bg_th`")
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
# depth_and_normal not supported in controlnet_aux v0.0.3
|
||||
# depth_and_normal: bool = InputField(default=False, description="whether to use depth and normal mode")
|
||||
|
||||
@ -310,13 +303,13 @@ class MidasDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Normal BAE Processor",
|
||||
tags=["controlnet"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies NormalBae processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
normalbae_processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
|
||||
@ -327,13 +320,13 @@ class NormalbaeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
|
||||
|
||||
@invocation(
|
||||
"mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.2.2"
|
||||
"mlsd_image_processor", title="MLSD Processor", tags=["controlnet", "mlsd"], category="controlnet", version="1.2.3"
|
||||
)
|
||||
class MlsdImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies MLSD processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
thr_v: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_v`")
|
||||
thr_d: float = InputField(default=0.1, ge=0, description="MLSD parameter `thr_d`")
|
||||
|
||||
@ -350,13 +343,13 @@ class MlsdImageProcessorInvocation(ImageProcessorInvocation):
|
||||
|
||||
|
||||
@invocation(
|
||||
"pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.2.2"
|
||||
"pidi_image_processor", title="PIDI Processor", tags=["controlnet", "pidi"], category="controlnet", version="1.2.3"
|
||||
)
|
||||
class PidiImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies PIDI processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
safe: bool = InputField(default=False, description=FieldDescriptions.safe_mode)
|
||||
scribble: bool = InputField(default=False, description=FieldDescriptions.scribble_mode)
|
||||
|
||||
@ -377,13 +370,13 @@ class PidiImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Content Shuffle Processor",
|
||||
tags=["controlnet", "contentshuffle"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies content shuffle processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
h: int = InputField(default=512, ge=0, description="Content shuffle `h` parameter")
|
||||
w: int = InputField(default=512, ge=0, description="Content shuffle `w` parameter")
|
||||
f: int = InputField(default=256, ge=0, description="Content shuffle `f` parameter")
|
||||
@ -407,7 +400,7 @@ class ContentShuffleImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Zoe (Depth) Processor",
|
||||
tags=["controlnet", "zoe", "depth"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies Zoe depth processing to image"""
|
||||
@ -423,15 +416,15 @@ class ZoeDepthImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Mediapipe Face Processor",
|
||||
tags=["controlnet", "mediapipe", "face"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
version="1.2.4",
|
||||
)
|
||||
class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies mediapipe face processing to image"""
|
||||
|
||||
max_faces: int = InputField(default=1, ge=1, description="Maximum number of faces to detect")
|
||||
min_confidence: float = InputField(default=0.5, ge=0, le=1, description="Minimum confidence for face detection")
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
mediapipe_face_processor = MediapipeFaceDetector()
|
||||
@ -450,7 +443,7 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Leres (Depth) Processor",
|
||||
tags=["controlnet", "leres", "depth"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class LeresImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies leres processing to image"""
|
||||
@ -458,8 +451,8 @@ class LeresImageProcessorInvocation(ImageProcessorInvocation):
|
||||
thr_a: float = InputField(default=0, description="Leres parameter `thr_a`")
|
||||
thr_b: float = InputField(default=0, description="Leres parameter `thr_b`")
|
||||
boost: bool = InputField(default=False, description="Whether to use boost mode")
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
leres_processor = LeresDetector.from_pretrained("lllyasviel/Annotators")
|
||||
@ -479,7 +472,7 @@ class LeresImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Tile Resample Processor",
|
||||
tags=["controlnet", "tile"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class TileResamplerProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Tile resampler processor"""
|
||||
@ -519,13 +512,13 @@ class TileResamplerProcessorInvocation(ImageProcessorInvocation):
|
||||
title="Segment Anything Processor",
|
||||
tags=["controlnet", "segmentanything"],
|
||||
category="controlnet",
|
||||
version="1.2.3",
|
||||
version="1.2.4",
|
||||
)
|
||||
class SegmentAnythingProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Applies segment anything processing to image"""
|
||||
|
||||
detect_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
detect_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.detect_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image):
|
||||
# segment_anything_processor = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints")
|
||||
@ -566,12 +559,12 @@ class SamDetectorReproducibleColors(SamDetector):
|
||||
title="Color Map Processor",
|
||||
tags=["controlnet"],
|
||||
category="controlnet",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class ColorMapImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates a color map from the provided image"""
|
||||
|
||||
color_map_tile_size: int = InputField(default=64, ge=0, description=FieldDescriptions.tile_size)
|
||||
color_map_tile_size: int = InputField(default=64, ge=1, description=FieldDescriptions.tile_size)
|
||||
|
||||
def run_processor(self, image: Image.Image):
|
||||
np_image = np.array(image, dtype=np.uint8)
|
||||
@ -598,7 +591,7 @@ DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small"]
|
||||
title="Depth Anything Processor",
|
||||
tags=["controlnet", "depth", "depth anything"],
|
||||
category="controlnet",
|
||||
version="1.1.1",
|
||||
version="1.1.2",
|
||||
)
|
||||
class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates a depth map based on the Depth Anything algorithm"""
|
||||
@ -606,7 +599,7 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
model_size: DEPTH_ANYTHING_MODEL_SIZES = InputField(
|
||||
default="small", description="The size of the depth model to use"
|
||||
)
|
||||
resolution: int = InputField(default=512, ge=64, multiple_of=64, description=FieldDescriptions.image_res)
|
||||
resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image):
|
||||
depth_anything_detector = DepthAnythingDetector()
|
||||
@ -621,7 +614,7 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation):
|
||||
title="DW Openpose Image Processor",
|
||||
tags=["controlnet", "dwpose", "openpose"],
|
||||
category="controlnet",
|
||||
version="1.1.0",
|
||||
version="1.1.1",
|
||||
)
|
||||
class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
"""Generates an openpose pose from an image using DWPose"""
|
||||
@ -629,7 +622,7 @@ class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
draw_body: bool = InputField(default=True)
|
||||
draw_face: bool = InputField(default=False)
|
||||
draw_hands: bool = InputField(default=False)
|
||||
image_resolution: int = InputField(default=512, ge=0, description=FieldDescriptions.image_res)
|
||||
image_resolution: int = InputField(default=512, ge=1, description=FieldDescriptions.image_res)
|
||||
|
||||
def run_processor(self, image: Image.Image):
|
||||
dw_openpose = DWOpenposeDetector()
|
||||
@ -641,3 +634,27 @@ class DWOpenposeImageProcessorInvocation(ImageProcessorInvocation):
|
||||
resolution=self.image_resolution,
|
||||
)
|
||||
return processed_image
|
||||
|
||||
|
||||
@invocation(
|
||||
"heuristic_resize",
|
||||
title="Heuristic Resize",
|
||||
tags=["image, controlnet"],
|
||||
category="image",
|
||||
version="1.0.1",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class HeuristicResizeInvocation(BaseInvocation):
|
||||
"""Resize an image using a heuristic method. Preserves edge maps."""
|
||||
|
||||
image: ImageField = InputField(description="The image to resize")
|
||||
width: int = InputField(default=512, ge=1, description="The width to resize to (px)")
|
||||
height: int = InputField(default=512, ge=1, description="The height to resize to (px)")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||
image = context.images.get_pil(self.image.image_name, "RGB")
|
||||
np_img = pil_to_np(image)
|
||||
np_resized = heuristic_resize(np_img, (self.width, self.height))
|
||||
resized = np_to_pil(np_resized)
|
||||
image_dto = context.images.save(image=resized)
|
||||
return ImageOutput.build(image_dto)
|
||||
|
@ -1,6 +1,5 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Literal, Optional
|
||||
|
||||
import cv2
|
||||
@ -504,7 +503,7 @@ class ImageInverseLerpInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
title="Blur NSFW Image",
|
||||
tags=["image", "nsfw"],
|
||||
category="image",
|
||||
version="1.2.2",
|
||||
version="1.2.3",
|
||||
)
|
||||
class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
"""Add blur to NSFW-flagged images"""
|
||||
@ -516,23 +515,12 @@ class ImageNSFWBlurInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
logger = context.logger
|
||||
logger.debug("Running NSFW checker")
|
||||
if SafetyChecker.has_nsfw_concept(image):
|
||||
logger.info("A potentially NSFW image has been detected. Image will be blurred.")
|
||||
blurry_image = image.filter(filter=ImageFilter.GaussianBlur(radius=32))
|
||||
caution = self._get_caution_img()
|
||||
blurry_image.paste(caution, (0, 0), caution)
|
||||
image = blurry_image
|
||||
image = SafetyChecker.blur_if_nsfw(image)
|
||||
|
||||
image_dto = context.images.save(image=image)
|
||||
|
||||
return ImageOutput.build(image_dto)
|
||||
|
||||
def _get_caution_img(self) -> Image.Image:
|
||||
import invokeai.app.assets.images as image_assets
|
||||
|
||||
caution = Image.open(Path(image_assets.__path__[0]) / "caution.png")
|
||||
return caution.resize((caution.width // 2, caution.height // 2))
|
||||
|
||||
|
||||
@invocation(
|
||||
"img_watermark",
|
||||
|
@ -4,20 +4,8 @@ from typing import List, Literal, Optional, Union
|
||||
from pydantic import BaseModel, Field, field_validator, model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
Input,
|
||||
InputField,
|
||||
OutputField,
|
||||
TensorField,
|
||||
UIType,
|
||||
)
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, TensorField, UIType
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.primitives import ImageField
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
@ -36,6 +24,7 @@ class IPAdapterField(BaseModel):
|
||||
ip_adapter_model: ModelIdentifierField = Field(description="The IP-Adapter model to use.")
|
||||
image_encoder_model: ModelIdentifierField = Field(description="The name of the CLIP image encoder model.")
|
||||
weight: Union[float, List[float]] = Field(default=1, description="The weight given to the IP-Adapter.")
|
||||
target_blocks: List[str] = Field(default=[], description="The IP Adapter blocks to apply")
|
||||
begin_step_percent: float = Field(
|
||||
default=0, ge=0, le=1, description="When the IP-Adapter is first applied (% of total steps)"
|
||||
)
|
||||
@ -69,7 +58,7 @@ class IPAdapterOutput(BaseInvocationOutput):
|
||||
CLIP_VISION_MODEL_MAP = {"ViT-H": "ip_adapter_sd_image_encoder", "ViT-G": "ip_adapter_sdxl_image_encoder"}
|
||||
|
||||
|
||||
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.3.0")
|
||||
@invocation("ip_adapter", title="IP-Adapter", tags=["ip_adapter", "control"], category="ip_adapter", version="1.4.1")
|
||||
class IPAdapterInvocation(BaseInvocation):
|
||||
"""Collects IP-Adapter info to pass to other nodes."""
|
||||
|
||||
@ -78,7 +67,6 @@ class IPAdapterInvocation(BaseInvocation):
|
||||
ip_adapter_model: ModelIdentifierField = InputField(
|
||||
description="The IP-Adapter model.",
|
||||
title="IP-Adapter Model",
|
||||
input=Input.Direct,
|
||||
ui_order=-1,
|
||||
ui_type=UIType.IPAdapterModel,
|
||||
)
|
||||
@ -90,6 +78,9 @@ class IPAdapterInvocation(BaseInvocation):
|
||||
weight: Union[float, List[float]] = InputField(
|
||||
default=1, description="The weight given to the IP-Adapter", title="Weight"
|
||||
)
|
||||
method: Literal["full", "style", "composition"] = InputField(
|
||||
default="full", description="The method to apply the IP-Adapter"
|
||||
)
|
||||
begin_step_percent: float = InputField(
|
||||
default=0, ge=0, le=1, description="When the IP-Adapter is first applied (% of total steps)"
|
||||
)
|
||||
@ -124,12 +115,32 @@ class IPAdapterInvocation(BaseInvocation):
|
||||
|
||||
image_encoder_model = self._get_image_encoder(context, image_encoder_model_name)
|
||||
|
||||
if self.method == "style":
|
||||
if ip_adapter_info.base == "sd-1":
|
||||
target_blocks = ["up_blocks.1"]
|
||||
elif ip_adapter_info.base == "sdxl":
|
||||
target_blocks = ["up_blocks.0.attentions.1"]
|
||||
else:
|
||||
raise ValueError(f"Unsupported IP-Adapter base type: '{ip_adapter_info.base}'.")
|
||||
elif self.method == "composition":
|
||||
if ip_adapter_info.base == "sd-1":
|
||||
target_blocks = ["down_blocks.2", "mid_block"]
|
||||
elif ip_adapter_info.base == "sdxl":
|
||||
target_blocks = ["down_blocks.2.attentions.1"]
|
||||
else:
|
||||
raise ValueError(f"Unsupported IP-Adapter base type: '{ip_adapter_info.base}'.")
|
||||
elif self.method == "full":
|
||||
target_blocks = ["block"]
|
||||
else:
|
||||
raise ValueError(f"Unexpected IP-Adapter method: '{self.method}'.")
|
||||
|
||||
return IPAdapterOutput(
|
||||
ip_adapter=IPAdapterField(
|
||||
image=self.image,
|
||||
ip_adapter_model=self.ip_adapter_model,
|
||||
image_encoder_model=ModelIdentifierField.from_config(image_encoder_model),
|
||||
weight=self.weight,
|
||||
target_blocks=target_blocks,
|
||||
begin_step_percent=self.begin_step_percent,
|
||||
end_step_percent=self.end_step_percent,
|
||||
mask=self.mask,
|
||||
|
@ -3,7 +3,7 @@ import inspect
|
||||
import math
|
||||
from contextlib import ExitStack
|
||||
from functools import singledispatchmethod
|
||||
from typing import Any, Iterator, List, Literal, Optional, Tuple, Union
|
||||
from typing import Any, Dict, Iterator, List, Literal, Optional, Tuple, Union
|
||||
|
||||
import einops
|
||||
import numpy as np
|
||||
@ -11,7 +11,6 @@ import numpy.typing as npt
|
||||
import torch
|
||||
import torchvision
|
||||
import torchvision.transforms as T
|
||||
from diffusers import AutoencoderKL, AutoencoderTiny
|
||||
from diffusers.configuration_utils import ConfigMixin
|
||||
from diffusers.image_processor import VaeImageProcessor
|
||||
from diffusers.models.adapter import T2IAdapter
|
||||
@ -21,9 +20,12 @@ from diffusers.models.attention_processor import (
|
||||
LoRAXFormersAttnProcessor,
|
||||
XFormersAttnProcessor,
|
||||
)
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
|
||||
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
||||
from diffusers.schedulers import DPMSolverSDEScheduler
|
||||
from diffusers.schedulers import SchedulerMixin as Scheduler
|
||||
from diffusers.schedulers.scheduling_dpmsolver_sde import DPMSolverSDEScheduler
|
||||
from diffusers.schedulers.scheduling_tcd import TCDScheduler
|
||||
from diffusers.schedulers.scheduling_utils import SchedulerMixin as Scheduler
|
||||
from PIL import Image, ImageFilter
|
||||
from pydantic import field_validator
|
||||
from torchvision.transforms.functional import resize as tv_resize
|
||||
@ -51,6 +53,7 @@ from invokeai.app.util.controlnet_utils import prepare_control_image
|
||||
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter, IPAdapterPlus
|
||||
from invokeai.backend.lora import LoRAModelRaw
|
||||
from invokeai.backend.model_manager import BaseModelType, LoadedModel
|
||||
from invokeai.backend.model_manager.config import MainConfigBase, ModelVariantType
|
||||
from invokeai.backend.model_patcher import ModelPatcher
|
||||
from invokeai.backend.stable_diffusion import PipelineIntermediateState, set_seamless
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
|
||||
@ -72,15 +75,12 @@ from ...backend.stable_diffusion.diffusers_pipeline import (
|
||||
image_resized_to_grid_as_tensor,
|
||||
)
|
||||
from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
|
||||
from ...backend.util.devices import choose_precision, choose_torch_device
|
||||
from ...backend.util.devices import TorchDevice
|
||||
from .baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from .controlnet_image_processors import ControlField
|
||||
from .model import ModelIdentifierField, UNetField, VAEField
|
||||
|
||||
if choose_torch_device() == torch.device("mps"):
|
||||
from torch import mps
|
||||
|
||||
DEFAULT_PRECISION = choose_precision(choose_torch_device())
|
||||
DEFAULT_PRECISION = TorchDevice.choose_torch_dtype()
|
||||
|
||||
|
||||
@invocation_output("scheduler_output")
|
||||
@ -188,7 +188,7 @@ class GradientMaskOutput(BaseInvocationOutput):
|
||||
title="Create Gradient Mask",
|
||||
tags=["mask", "denoise"],
|
||||
category="latents",
|
||||
version="1.0.0",
|
||||
version="1.1.0",
|
||||
)
|
||||
class CreateGradientMaskInvocation(BaseInvocation):
|
||||
"""Creates mask for denoising model run."""
|
||||
@ -201,6 +201,32 @@ class CreateGradientMaskInvocation(BaseInvocation):
|
||||
minimum_denoise: float = InputField(
|
||||
default=0.0, ge=0, le=1, description="Minimum denoise level for the coherence region", ui_order=4
|
||||
)
|
||||
image: Optional[ImageField] = InputField(
|
||||
default=None,
|
||||
description="OPTIONAL: Only connect for specialized Inpainting models, masked_latents will be generated from the image with the VAE",
|
||||
title="[OPTIONAL] Image",
|
||||
ui_order=6,
|
||||
)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
description="OPTIONAL: If the Unet is a specialized Inpainting model, masked_latents will be generated from the image with the VAE",
|
||||
default=None,
|
||||
input=Input.Connection,
|
||||
title="[OPTIONAL] UNet",
|
||||
ui_order=5,
|
||||
)
|
||||
vae: Optional[VAEField] = InputField(
|
||||
default=None,
|
||||
description="OPTIONAL: Only connect for specialized Inpainting models, masked_latents will be generated from the image with the VAE",
|
||||
title="[OPTIONAL] VAE",
|
||||
input=Input.Connection,
|
||||
ui_order=7,
|
||||
)
|
||||
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=8)
|
||||
fp32: bool = InputField(
|
||||
default=DEFAULT_PRECISION == "float32",
|
||||
description=FieldDescriptions.fp32,
|
||||
ui_order=9,
|
||||
)
|
||||
|
||||
@torch.no_grad()
|
||||
def invoke(self, context: InvocationContext) -> GradientMaskOutput:
|
||||
@ -236,8 +262,27 @@ class CreateGradientMaskInvocation(BaseInvocation):
|
||||
expanded_mask_image = Image.fromarray((expanded_mask.squeeze(0).numpy() * 255).astype(np.uint8), mode="L")
|
||||
expanded_image_dto = context.images.save(expanded_mask_image)
|
||||
|
||||
masked_latents_name = None
|
||||
if self.unet is not None and self.vae is not None and self.image is not None:
|
||||
# all three fields must be present at the same time
|
||||
main_model_config = context.models.get_config(self.unet.unet.key)
|
||||
assert isinstance(main_model_config, MainConfigBase)
|
||||
if main_model_config.variant is ModelVariantType.Inpaint:
|
||||
mask = blur_tensor
|
||||
vae_info: LoadedModel = context.models.load(self.vae.vae)
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
image_tensor = image_resized_to_grid_as_tensor(image.convert("RGB"))
|
||||
if image_tensor.dim() == 3:
|
||||
image_tensor = image_tensor.unsqueeze(0)
|
||||
img_mask = tv_resize(mask, image_tensor.shape[-2:], T.InterpolationMode.BILINEAR, antialias=False)
|
||||
masked_image = image_tensor * torch.where(img_mask < 0.5, 0.0, 1.0)
|
||||
masked_latents = ImageToLatentsInvocation.vae_encode(
|
||||
vae_info, self.fp32, self.tiled, masked_image.clone()
|
||||
)
|
||||
masked_latents_name = context.tensors.save(tensor=masked_latents)
|
||||
|
||||
return GradientMaskOutput(
|
||||
denoise_mask=DenoiseMaskField(mask_name=mask_name, masked_latents_name=None, gradient=True),
|
||||
denoise_mask=DenoiseMaskField(mask_name=mask_name, masked_latents_name=masked_latents_name, gradient=True),
|
||||
expanded_mask_area=ImageField(image_name=expanded_image_dto.image_name),
|
||||
)
|
||||
|
||||
@ -298,7 +343,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
)
|
||||
steps: int = InputField(default=10, gt=0, description=FieldDescriptions.steps)
|
||||
cfg_scale: Union[float, List[float]] = InputField(
|
||||
default=7.5, ge=1, description=FieldDescriptions.cfg_scale, title="CFG Scale"
|
||||
default=7.5, description=FieldDescriptions.cfg_scale, title="CFG Scale"
|
||||
)
|
||||
denoising_start: float = InputField(
|
||||
default=0.0,
|
||||
@ -478,9 +523,10 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
)
|
||||
|
||||
if is_sdxl:
|
||||
return SDXLConditioningInfo(
|
||||
embeds=text_embedding, pooled_embeds=pooled_embedding, add_time_ids=add_time_ids
|
||||
), regions
|
||||
return (
|
||||
SDXLConditioningInfo(embeds=text_embedding, pooled_embeds=pooled_embedding, add_time_ids=add_time_ids),
|
||||
regions,
|
||||
)
|
||||
return BasicConditioningInfo(embeds=text_embedding), regions
|
||||
|
||||
def get_conditioning_data(
|
||||
@ -520,6 +566,11 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
dtype=unet.dtype,
|
||||
)
|
||||
|
||||
if isinstance(self.cfg_scale, list):
|
||||
assert (
|
||||
len(self.cfg_scale) == self.steps
|
||||
), "cfg_scale (list) must have the same length as the number of steps"
|
||||
|
||||
conditioning_data = TextConditioningData(
|
||||
uncond_text=uncond_text_embedding,
|
||||
cond_text=cond_text_embedding,
|
||||
@ -535,13 +586,6 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
unet: UNet2DConditionModel,
|
||||
scheduler: Scheduler,
|
||||
) -> StableDiffusionGeneratorPipeline:
|
||||
# TODO:
|
||||
# configure_model_padding(
|
||||
# unet,
|
||||
# self.seamless,
|
||||
# self.seamless_axes,
|
||||
# )
|
||||
|
||||
class FakeVae:
|
||||
class FakeVaeConfig:
|
||||
def __init__(self) -> None:
|
||||
@ -682,6 +726,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
IPAdapterData(
|
||||
ip_adapter_model=ip_adapter_model,
|
||||
weight=single_ip_adapter.weight,
|
||||
target_blocks=single_ip_adapter.target_blocks,
|
||||
begin_step_percent=single_ip_adapter.begin_step_percent,
|
||||
end_step_percent=single_ip_adapter.end_step_percent,
|
||||
ip_adapter_conditioning=IPAdapterConditioningInfo(image_prompt_embeds, uncond_image_prompt_embeds),
|
||||
@ -776,7 +821,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
denoising_start: float,
|
||||
denoising_end: float,
|
||||
seed: int,
|
||||
) -> Tuple[int, List[int], int]:
|
||||
) -> Tuple[int, List[int], int, Dict[str, Any]]:
|
||||
assert isinstance(scheduler, ConfigMixin)
|
||||
if scheduler.config.get("cpu_only", False):
|
||||
scheduler.set_timesteps(steps, device="cpu")
|
||||
@ -804,13 +849,15 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
timesteps = timesteps[t_start_idx : t_start_idx + t_end_idx]
|
||||
num_inference_steps = len(timesteps) // scheduler.order
|
||||
|
||||
scheduler_step_kwargs = {}
|
||||
scheduler_step_kwargs: Dict[str, Any] = {}
|
||||
scheduler_step_signature = inspect.signature(scheduler.step)
|
||||
if "generator" in scheduler_step_signature.parameters:
|
||||
# At some point, someone decided that schedulers that accept a generator should use the original seed with
|
||||
# all bits flipped. I don't know the original rationale for this, but now we must keep it like this for
|
||||
# reproducibility.
|
||||
scheduler_step_kwargs = {"generator": torch.Generator(device=device).manual_seed(seed ^ 0xFFFFFFFF)}
|
||||
scheduler_step_kwargs.update({"generator": torch.Generator(device=device).manual_seed(seed ^ 0xFFFFFFFF)})
|
||||
if isinstance(scheduler, TCDScheduler):
|
||||
scheduler_step_kwargs.update({"eta": 1.0})
|
||||
|
||||
return num_inference_steps, timesteps, init_timestep, scheduler_step_kwargs
|
||||
|
||||
@ -959,9 +1006,7 @@ class DenoiseLatentsInvocation(BaseInvocation):
|
||||
|
||||
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
||||
result_latents = result_latents.to("cpu")
|
||||
torch.cuda.empty_cache()
|
||||
if choose_torch_device() == torch.device("mps"):
|
||||
mps.empty_cache()
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
name = context.tensors.save(tensor=result_latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=result_latents, seed=None)
|
||||
@ -1028,9 +1073,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
vae.disable_tiling()
|
||||
|
||||
# clear memory as vae decode can request a lot
|
||||
torch.cuda.empty_cache()
|
||||
if choose_torch_device() == torch.device("mps"):
|
||||
mps.empty_cache()
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
with torch.inference_mode():
|
||||
# copied from diffusers pipeline
|
||||
@ -1042,9 +1085,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
|
||||
image = VaeImageProcessor.numpy_to_pil(np_image)[0]
|
||||
|
||||
torch.cuda.empty_cache()
|
||||
if choose_torch_device() == torch.device("mps"):
|
||||
mps.empty_cache()
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
image_dto = context.images.save(image=image)
|
||||
|
||||
@ -1083,9 +1124,7 @@ class ResizeLatentsInvocation(BaseInvocation):
|
||||
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
latents = context.tensors.load(self.latents.latents_name)
|
||||
|
||||
# TODO:
|
||||
device = choose_torch_device()
|
||||
device = TorchDevice.choose_torch_device()
|
||||
|
||||
resized_latents = torch.nn.functional.interpolate(
|
||||
latents.to(device),
|
||||
@ -1096,9 +1135,8 @@ class ResizeLatentsInvocation(BaseInvocation):
|
||||
|
||||
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
||||
resized_latents = resized_latents.to("cpu")
|
||||
torch.cuda.empty_cache()
|
||||
if device == torch.device("mps"):
|
||||
mps.empty_cache()
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
name = context.tensors.save(tensor=resized_latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=resized_latents, seed=self.latents.seed)
|
||||
@ -1125,8 +1163,7 @@ class ScaleLatentsInvocation(BaseInvocation):
|
||||
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||
latents = context.tensors.load(self.latents.latents_name)
|
||||
|
||||
# TODO:
|
||||
device = choose_torch_device()
|
||||
device = TorchDevice.choose_torch_device()
|
||||
|
||||
# resizing
|
||||
resized_latents = torch.nn.functional.interpolate(
|
||||
@ -1138,9 +1175,7 @@ class ScaleLatentsInvocation(BaseInvocation):
|
||||
|
||||
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
||||
resized_latents = resized_latents.to("cpu")
|
||||
torch.cuda.empty_cache()
|
||||
if device == torch.device("mps"):
|
||||
mps.empty_cache()
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
name = context.tensors.save(tensor=resized_latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=resized_latents, seed=self.latents.seed)
|
||||
@ -1272,8 +1307,7 @@ class BlendLatentsInvocation(BaseInvocation):
|
||||
if latents_a.shape != latents_b.shape:
|
||||
raise Exception("Latents to blend must be the same size.")
|
||||
|
||||
# TODO:
|
||||
device = choose_torch_device()
|
||||
device = TorchDevice.choose_torch_device()
|
||||
|
||||
def slerp(
|
||||
t: Union[float, npt.NDArray[Any]], # FIXME: maybe use np.float32 here?
|
||||
@ -1326,9 +1360,8 @@ class BlendLatentsInvocation(BaseInvocation):
|
||||
|
||||
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
||||
blended_latents = blended_latents.to("cpu")
|
||||
torch.cuda.empty_cache()
|
||||
if device == torch.device("mps"):
|
||||
mps.empty_cache()
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
name = context.tensors.save(tensor=blended_latents)
|
||||
return LatentsOutput.build(latents_name=name, latents=blended_latents)
|
||||
|
@ -1,7 +1,8 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, InvocationContext, invocation
|
||||
from invokeai.app.invocations.fields import InputField, TensorField, WithMetadata
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, InvocationContext, invocation
|
||||
from invokeai.app.invocations.fields import ImageField, InputField, TensorField, WithMetadata
|
||||
from invokeai.app.invocations.primitives import MaskOutput
|
||||
|
||||
|
||||
@ -34,3 +35,86 @@ class RectangleMaskInvocation(BaseInvocation, WithMetadata):
|
||||
width=self.width,
|
||||
height=self.height,
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"alpha_mask_to_tensor",
|
||||
title="Alpha Mask to Tensor",
|
||||
tags=["conditioning"],
|
||||
category="conditioning",
|
||||
version="1.0.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class AlphaMaskToTensorInvocation(BaseInvocation):
|
||||
"""Convert a mask image to a tensor. Opaque regions are 1 and transparent regions are 0."""
|
||||
|
||||
image: ImageField = InputField(description="The mask image to convert.")
|
||||
invert: bool = InputField(default=False, description="Whether to invert the mask.")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> MaskOutput:
|
||||
image = context.images.get_pil(self.image.image_name)
|
||||
mask = torch.zeros((1, image.height, image.width), dtype=torch.bool)
|
||||
if self.invert:
|
||||
mask[0] = torch.tensor(np.array(image)[:, :, 3] == 0, dtype=torch.bool)
|
||||
else:
|
||||
mask[0] = torch.tensor(np.array(image)[:, :, 3] > 0, dtype=torch.bool)
|
||||
|
||||
return MaskOutput(
|
||||
mask=TensorField(tensor_name=context.tensors.save(mask)),
|
||||
height=mask.shape[1],
|
||||
width=mask.shape[2],
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"invert_tensor_mask",
|
||||
title="Invert Tensor Mask",
|
||||
tags=["conditioning"],
|
||||
category="conditioning",
|
||||
version="1.0.0",
|
||||
classification=Classification.Beta,
|
||||
)
|
||||
class InvertTensorMaskInvocation(BaseInvocation):
|
||||
"""Inverts a tensor mask."""
|
||||
|
||||
mask: TensorField = InputField(description="The tensor mask to convert.")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> MaskOutput:
|
||||
mask = context.tensors.load(self.mask.tensor_name)
|
||||
inverted = ~mask
|
||||
|
||||
return MaskOutput(
|
||||
mask=TensorField(tensor_name=context.tensors.save(inverted)),
|
||||
height=inverted.shape[1],
|
||||
width=inverted.shape[2],
|
||||
)
|
||||
|
||||
|
||||
@invocation(
|
||||
"image_mask_to_tensor",
|
||||
title="Image Mask to Tensor",
|
||||
tags=["conditioning"],
|
||||
category="conditioning",
|
||||
version="1.0.0",
|
||||
)
|
||||
class ImageMaskToTensorInvocation(BaseInvocation, WithMetadata):
|
||||
"""Convert a mask image to a tensor. Converts the image to grayscale and uses thresholding at the specified value."""
|
||||
|
||||
image: ImageField = InputField(description="The mask image to convert.")
|
||||
cutoff: int = InputField(ge=0, le=255, description="Cutoff (<)", default=128)
|
||||
invert: bool = InputField(default=False, description="Whether to invert the mask.")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> MaskOutput:
|
||||
image = context.images.get_pil(self.image.image_name, mode="L")
|
||||
|
||||
mask = torch.zeros((1, image.height, image.width), dtype=torch.bool)
|
||||
if self.invert:
|
||||
mask[0] = torch.tensor(np.array(image)[:, :] >= self.cutoff, dtype=torch.bool)
|
||||
else:
|
||||
mask[0] = torch.tensor(np.array(image)[:, :] < self.cutoff, dtype=torch.bool)
|
||||
|
||||
return MaskOutput(
|
||||
mask=TensorField(tensor_name=context.tensors.save(mask)),
|
||||
height=mask.shape[1],
|
||||
width=mask.shape[2],
|
||||
)
|
||||
|
@ -3,7 +3,6 @@ from typing import Any, Literal, Optional, Union
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
|
||||
from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES
|
||||
from invokeai.app.invocations.fields import (
|
||||
FieldDescriptions,
|
||||
ImageField,
|
||||
@ -14,6 +13,7 @@ from invokeai.app.invocations.fields import (
|
||||
)
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import CONTROLNET_MODE_VALUES, CONTROLNET_RESIZE_VALUES
|
||||
|
||||
from ...version import __version__
|
||||
|
||||
@ -36,6 +36,7 @@ class IPAdapterMetadataField(BaseModel):
|
||||
image: ImageField = Field(description="The IP-Adapter image prompt.")
|
||||
ip_adapter_model: ModelIdentifierField = Field(description="The IP-Adapter model.")
|
||||
clip_vision_model: Literal["ViT-H", "ViT-G"] = Field(description="The CLIP Vision model")
|
||||
method: Literal["full", "style", "composition"] = Field(description="Method to apply IP Weights with")
|
||||
weight: Union[float, list[float]] = Field(description="The weight given to the IP-Adapter")
|
||||
begin_step_percent: float = Field(description="When the IP-Adapter is first applied (% of total steps)")
|
||||
end_step_percent: float = Field(description="When the IP-Adapter is last applied (% of total steps)")
|
||||
|
@ -11,6 +11,7 @@ from invokeai.backend.model_manager.config import AnyModelConfig, BaseModelType,
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
Classification,
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
@ -93,19 +94,46 @@ class ModelLoaderOutput(UNetOutput, CLIPOutput, VAEOutput):
|
||||
pass
|
||||
|
||||
|
||||
@invocation_output("model_identifier_output")
|
||||
class ModelIdentifierOutput(BaseInvocationOutput):
|
||||
"""Model identifier output"""
|
||||
|
||||
model: ModelIdentifierField = OutputField(description="Model identifier", title="Model")
|
||||
|
||||
|
||||
@invocation(
|
||||
"model_identifier",
|
||||
title="Model identifier",
|
||||
tags=["model"],
|
||||
category="model",
|
||||
version="1.0.0",
|
||||
classification=Classification.Prototype,
|
||||
)
|
||||
class ModelIdentifierInvocation(BaseInvocation):
|
||||
"""Selects any model, outputting it its identifier. Be careful with this one! The identifier will be accepted as
|
||||
input for any model, even if the model types don't match. If you connect this to a mismatched input, you'll get an
|
||||
error."""
|
||||
|
||||
model: ModelIdentifierField = InputField(description="The model to select", title="Model")
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ModelIdentifierOutput:
|
||||
if not context.models.exists(self.model.key):
|
||||
raise Exception(f"Unknown model {self.model.key}")
|
||||
|
||||
return ModelIdentifierOutput(model=self.model)
|
||||
|
||||
|
||||
@invocation(
|
||||
"main_model_loader",
|
||||
title="Main Model",
|
||||
tags=["model"],
|
||||
category="model",
|
||||
version="1.0.2",
|
||||
version="1.0.3",
|
||||
)
|
||||
class MainModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads a main model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.main_model, input=Input.Direct, ui_type=UIType.MainModel
|
||||
)
|
||||
model: ModelIdentifierField = InputField(description=FieldDescriptions.main_model, ui_type=UIType.MainModel)
|
||||
# TODO: precision?
|
||||
|
||||
def invoke(self, context: InvocationContext) -> ModelLoaderOutput:
|
||||
@ -134,12 +162,12 @@ class LoRALoaderOutput(BaseInvocationOutput):
|
||||
clip: Optional[CLIPField] = OutputField(default=None, description=FieldDescriptions.clip, title="CLIP")
|
||||
|
||||
|
||||
@invocation("lora_loader", title="LoRA", tags=["model"], category="model", version="1.0.2")
|
||||
@invocation("lora_loader", title="LoRA", tags=["model"], category="model", version="1.0.3")
|
||||
class LoRALoaderInvocation(BaseInvocation):
|
||||
"""Apply selected lora to unet and text_encoder."""
|
||||
|
||||
lora: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA", ui_type=UIType.LoRAModel
|
||||
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
|
||||
)
|
||||
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
@ -190,6 +218,75 @@ class LoRALoaderInvocation(BaseInvocation):
|
||||
return output
|
||||
|
||||
|
||||
@invocation_output("lora_selector_output")
|
||||
class LoRASelectorOutput(BaseInvocationOutput):
|
||||
"""Model loader output"""
|
||||
|
||||
lora: LoRAField = OutputField(description="LoRA model and weight", title="LoRA")
|
||||
|
||||
|
||||
@invocation("lora_selector", title="LoRA Selector", tags=["model"], category="model", version="1.0.1")
|
||||
class LoRASelectorInvocation(BaseInvocation):
|
||||
"""Selects a LoRA model and weight."""
|
||||
|
||||
lora: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
|
||||
)
|
||||
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> LoRASelectorOutput:
|
||||
return LoRASelectorOutput(lora=LoRAField(lora=self.lora, weight=self.weight))
|
||||
|
||||
|
||||
@invocation("lora_collection_loader", title="LoRA Collection Loader", tags=["model"], category="model", version="1.0.0")
|
||||
class LoRACollectionLoader(BaseInvocation):
|
||||
"""Applies a collection of LoRAs to the provided UNet and CLIP models."""
|
||||
|
||||
loras: LoRAField | list[LoRAField] = InputField(
|
||||
description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.unet,
|
||||
input=Input.Connection,
|
||||
title="UNet",
|
||||
)
|
||||
clip: Optional[CLIPField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
title="CLIP",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> LoRALoaderOutput:
|
||||
output = LoRALoaderOutput()
|
||||
loras = self.loras if isinstance(self.loras, list) else [self.loras]
|
||||
added_loras: list[str] = []
|
||||
|
||||
for lora in loras:
|
||||
if lora.lora.key in added_loras:
|
||||
continue
|
||||
|
||||
if not context.models.exists(lora.lora.key):
|
||||
raise Exception(f"Unknown lora: {lora.lora.key}!")
|
||||
|
||||
assert lora.lora.base in (BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2)
|
||||
|
||||
added_loras.append(lora.lora.key)
|
||||
|
||||
if self.unet is not None:
|
||||
if output.unet is None:
|
||||
output.unet = self.unet.model_copy(deep=True)
|
||||
output.unet.loras.append(lora)
|
||||
|
||||
if self.clip is not None:
|
||||
if output.clip is None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
output.clip.loras.append(lora)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
@invocation_output("sdxl_lora_loader_output")
|
||||
class SDXLLoRALoaderOutput(BaseInvocationOutput):
|
||||
"""SDXL LoRA Loader Output"""
|
||||
@ -204,13 +301,13 @@ class SDXLLoRALoaderOutput(BaseInvocationOutput):
|
||||
title="SDXL LoRA",
|
||||
tags=["lora", "model"],
|
||||
category="model",
|
||||
version="1.0.2",
|
||||
version="1.0.3",
|
||||
)
|
||||
class SDXLLoRALoaderInvocation(BaseInvocation):
|
||||
"""Apply selected lora to unet and text_encoder."""
|
||||
|
||||
lora: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.lora_model, input=Input.Direct, title="LoRA", ui_type=UIType.LoRAModel
|
||||
description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel
|
||||
)
|
||||
weight: float = InputField(default=0.75, description=FieldDescriptions.lora_weight)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
@ -279,12 +376,78 @@ class SDXLLoRALoaderInvocation(BaseInvocation):
|
||||
return output
|
||||
|
||||
|
||||
@invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.2")
|
||||
@invocation(
|
||||
"sdxl_lora_collection_loader",
|
||||
title="SDXL LoRA Collection Loader",
|
||||
tags=["model"],
|
||||
category="model",
|
||||
version="1.0.0",
|
||||
)
|
||||
class SDXLLoRACollectionLoader(BaseInvocation):
|
||||
"""Applies a collection of SDXL LoRAs to the provided UNet and CLIP models."""
|
||||
|
||||
loras: LoRAField | list[LoRAField] = InputField(
|
||||
description="LoRA models and weights. May be a single LoRA or collection.", title="LoRAs"
|
||||
)
|
||||
unet: Optional[UNetField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.unet,
|
||||
input=Input.Connection,
|
||||
title="UNet",
|
||||
)
|
||||
clip: Optional[CLIPField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
title="CLIP",
|
||||
)
|
||||
clip2: Optional[CLIPField] = InputField(
|
||||
default=None,
|
||||
description=FieldDescriptions.clip,
|
||||
input=Input.Connection,
|
||||
title="CLIP 2",
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> SDXLLoRALoaderOutput:
|
||||
output = SDXLLoRALoaderOutput()
|
||||
loras = self.loras if isinstance(self.loras, list) else [self.loras]
|
||||
added_loras: list[str] = []
|
||||
|
||||
for lora in loras:
|
||||
if lora.lora.key in added_loras:
|
||||
continue
|
||||
|
||||
if not context.models.exists(lora.lora.key):
|
||||
raise Exception(f"Unknown lora: {lora.lora.key}!")
|
||||
|
||||
assert lora.lora.base is BaseModelType.StableDiffusionXL
|
||||
|
||||
added_loras.append(lora.lora.key)
|
||||
|
||||
if self.unet is not None:
|
||||
if output.unet is None:
|
||||
output.unet = self.unet.model_copy(deep=True)
|
||||
output.unet.loras.append(lora)
|
||||
|
||||
if self.clip is not None:
|
||||
if output.clip is None:
|
||||
output.clip = self.clip.model_copy(deep=True)
|
||||
output.clip.loras.append(lora)
|
||||
|
||||
if self.clip2 is not None:
|
||||
if output.clip2 is None:
|
||||
output.clip2 = self.clip2.model_copy(deep=True)
|
||||
output.clip2.loras.append(lora)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
@invocation("vae_loader", title="VAE", tags=["vae", "model"], category="model", version="1.0.3")
|
||||
class VAELoaderInvocation(BaseInvocation):
|
||||
"""Loads a VAE model, outputting a VaeLoaderOutput"""
|
||||
|
||||
vae_model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.vae_model, input=Input.Direct, title="VAE", ui_type=UIType.VAEModel
|
||||
description=FieldDescriptions.vae_model, title="VAE", ui_type=UIType.VAEModel
|
||||
)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> VAEOutput:
|
||||
|
@ -9,7 +9,7 @@ from invokeai.app.invocations.fields import FieldDescriptions, InputField, Laten
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.misc import SEED_MAX
|
||||
|
||||
from ...backend.util.devices import choose_torch_device, torch_dtype
|
||||
from ...backend.util.devices import TorchDevice
|
||||
from .baseinvocation import (
|
||||
BaseInvocation,
|
||||
BaseInvocationOutput,
|
||||
@ -46,7 +46,7 @@ def get_noise(
|
||||
height // downsampling_factor,
|
||||
width // downsampling_factor,
|
||||
],
|
||||
dtype=torch_dtype(device),
|
||||
dtype=TorchDevice.choose_torch_dtype(device=device),
|
||||
device=noise_device_type,
|
||||
generator=generator,
|
||||
).to("cpu")
|
||||
@ -111,14 +111,14 @@ class NoiseInvocation(BaseInvocation):
|
||||
|
||||
@field_validator("seed", mode="before")
|
||||
def modulo_seed(cls, v):
|
||||
"""Returns the seed modulo (SEED_MAX + 1) to ensure it is within the valid range."""
|
||||
"""Return the seed modulo (SEED_MAX + 1) to ensure it is within the valid range."""
|
||||
return v % (SEED_MAX + 1)
|
||||
|
||||
def invoke(self, context: InvocationContext) -> NoiseOutput:
|
||||
noise = get_noise(
|
||||
width=self.width,
|
||||
height=self.height,
|
||||
device=choose_torch_device(),
|
||||
device=TorchDevice.choose_torch_device(),
|
||||
seed=self.seed,
|
||||
use_cpu=self.use_cpu,
|
||||
)
|
||||
|
@ -1,4 +1,4 @@
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, InputField, OutputField, UIType
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.backend.model_manager import SubModelType
|
||||
|
||||
@ -30,12 +30,12 @@ class SDXLRefinerModelLoaderOutput(BaseInvocationOutput):
|
||||
vae: VAEField = OutputField(description=FieldDescriptions.vae, title="VAE")
|
||||
|
||||
|
||||
@invocation("sdxl_model_loader", title="SDXL Main Model", tags=["model", "sdxl"], category="model", version="1.0.2")
|
||||
@invocation("sdxl_model_loader", title="SDXL Main Model", tags=["model", "sdxl"], category="model", version="1.0.3")
|
||||
class SDXLModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads an sdxl base model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.sdxl_main_model, input=Input.Direct, ui_type=UIType.SDXLMainModel
|
||||
description=FieldDescriptions.sdxl_main_model, ui_type=UIType.SDXLMainModel
|
||||
)
|
||||
# TODO: precision?
|
||||
|
||||
@ -67,13 +67,13 @@ class SDXLModelLoaderInvocation(BaseInvocation):
|
||||
title="SDXL Refiner Model",
|
||||
tags=["model", "sdxl", "refiner"],
|
||||
category="model",
|
||||
version="1.0.2",
|
||||
version="1.0.3",
|
||||
)
|
||||
class SDXLRefinerModelLoaderInvocation(BaseInvocation):
|
||||
"""Loads an sdxl refiner model, outputting its submodels."""
|
||||
|
||||
model: ModelIdentifierField = InputField(
|
||||
description=FieldDescriptions.sdxl_refiner_model, input=Input.Direct, ui_type=UIType.SDXLRefinerModel
|
||||
description=FieldDescriptions.sdxl_refiner_model, ui_type=UIType.SDXLRefinerModel
|
||||
)
|
||||
# TODO: precision?
|
||||
|
||||
|
@ -8,11 +8,11 @@ from invokeai.app.invocations.baseinvocation import (
|
||||
invocation,
|
||||
invocation_output,
|
||||
)
|
||||
from invokeai.app.invocations.controlnet_image_processors import CONTROLNET_RESIZE_VALUES
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, OutputField, UIType
|
||||
from invokeai.app.invocations.model import ModelIdentifierField
|
||||
from invokeai.app.invocations.util import validate_begin_end_step, validate_weights
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.controlnet_utils import CONTROLNET_RESIZE_VALUES
|
||||
|
||||
|
||||
class T2IAdapterField(BaseModel):
|
||||
@ -45,7 +45,7 @@ class T2IAdapterOutput(BaseInvocationOutput):
|
||||
|
||||
|
||||
@invocation(
|
||||
"t2i_adapter", title="T2I-Adapter", tags=["t2i_adapter", "control"], category="t2i_adapter", version="1.0.2"
|
||||
"t2i_adapter", title="T2I-Adapter", tags=["t2i_adapter", "control"], category="t2i_adapter", version="1.0.3"
|
||||
)
|
||||
class T2IAdapterInvocation(BaseInvocation):
|
||||
"""Collects T2I-Adapter info to pass to other nodes."""
|
||||
@ -55,7 +55,6 @@ class T2IAdapterInvocation(BaseInvocation):
|
||||
t2i_adapter_model: ModelIdentifierField = InputField(
|
||||
description="The T2I-Adapter model.",
|
||||
title="T2I-Adapter Model",
|
||||
input=Input.Direct,
|
||||
ui_order=-1,
|
||||
ui_type=UIType.T2IAdapterModel,
|
||||
)
|
||||
|
@ -4,7 +4,6 @@ from typing import Literal
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
from PIL import Image
|
||||
from pydantic import ConfigDict
|
||||
|
||||
@ -14,7 +13,7 @@ from invokeai.app.services.shared.invocation_context import InvocationContext
|
||||
from invokeai.app.util.download_with_progress import download_with_progress_bar
|
||||
from invokeai.backend.image_util.basicsr.rrdbnet_arch import RRDBNet
|
||||
from invokeai.backend.image_util.realesrgan.realesrgan import RealESRGAN
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
from .baseinvocation import BaseInvocation, invocation
|
||||
from .fields import InputField, WithBoard, WithMetadata
|
||||
@ -35,9 +34,6 @@ ESRGAN_MODEL_URLS: dict[str, str] = {
|
||||
"RealESRGAN_x2plus.pth": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
|
||||
}
|
||||
|
||||
if choose_torch_device() == torch.device("mps"):
|
||||
from torch import mps
|
||||
|
||||
|
||||
@invocation("esrgan", title="Upscale (RealESRGAN)", tags=["esrgan", "upscale"], category="esrgan", version="1.3.2")
|
||||
class ESRGANInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
@ -120,9 +116,7 @@ class ESRGANInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
upscaled_image = upscaler.upscale(cv2_image)
|
||||
pil_image = Image.fromarray(cv2.cvtColor(upscaled_image, cv2.COLOR_BGR2RGB)).convert("RGBA")
|
||||
|
||||
torch.cuda.empty_cache()
|
||||
if choose_torch_device() == torch.device("mps"):
|
||||
mps.empty_cache()
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
image_dto = context.images.save(image=pil_image)
|
||||
|
||||
|
@ -106,9 +106,7 @@ class BulkDownloadService(BulkDownloadBase):
|
||||
if self._invoker:
|
||||
assert bulk_download_id is not None
|
||||
self._invoker.services.events.emit_bulk_download_started(
|
||||
bulk_download_id=bulk_download_id,
|
||||
bulk_download_item_id=bulk_download_item_id,
|
||||
bulk_download_item_name=bulk_download_item_name,
|
||||
bulk_download_id, bulk_download_item_id, bulk_download_item_name
|
||||
)
|
||||
|
||||
def _signal_job_completed(
|
||||
@ -118,10 +116,8 @@ class BulkDownloadService(BulkDownloadBase):
|
||||
if self._invoker:
|
||||
assert bulk_download_id is not None
|
||||
assert bulk_download_item_name is not None
|
||||
self._invoker.services.events.emit_bulk_download_completed(
|
||||
bulk_download_id=bulk_download_id,
|
||||
bulk_download_item_id=bulk_download_item_id,
|
||||
bulk_download_item_name=bulk_download_item_name,
|
||||
self._invoker.services.events.emit_bulk_download_complete(
|
||||
bulk_download_id, bulk_download_item_id, bulk_download_item_name
|
||||
)
|
||||
|
||||
def _signal_job_failed(
|
||||
@ -131,11 +127,8 @@ class BulkDownloadService(BulkDownloadBase):
|
||||
if self._invoker:
|
||||
assert bulk_download_id is not None
|
||||
assert exception is not None
|
||||
self._invoker.services.events.emit_bulk_download_failed(
|
||||
bulk_download_id=bulk_download_id,
|
||||
bulk_download_item_id=bulk_download_item_id,
|
||||
bulk_download_item_name=bulk_download_item_name,
|
||||
error=str(exception),
|
||||
self._invoker.services.events.emit_bulk_download_error(
|
||||
bulk_download_id, bulk_download_item_id, bulk_download_item_name, str(exception)
|
||||
)
|
||||
|
||||
def stop(self, *args, **kwargs):
|
||||
|
@ -27,12 +27,12 @@ DEFAULT_RAM_CACHE = 10.0
|
||||
DEFAULT_VRAM_CACHE = 0.25
|
||||
DEFAULT_CONVERT_CACHE = 20.0
|
||||
DEVICE = Literal["auto", "cpu", "cuda", "cuda:1", "mps"]
|
||||
PRECISION = Literal["auto", "float16", "bfloat16", "float32", "autocast"]
|
||||
PRECISION = Literal["auto", "float16", "bfloat16", "float32"]
|
||||
ATTENTION_TYPE = Literal["auto", "normal", "xformers", "sliced", "torch-sdp"]
|
||||
ATTENTION_SLICE_SIZE = Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8]
|
||||
LOG_FORMAT = Literal["plain", "color", "syslog", "legacy"]
|
||||
LOG_LEVEL = Literal["debug", "info", "warning", "error", "critical"]
|
||||
CONFIG_SCHEMA_VERSION = "4.0.0"
|
||||
CONFIG_SCHEMA_VERSION = "4.0.1"
|
||||
|
||||
|
||||
def get_default_ram_cache_size() -> float:
|
||||
@ -105,7 +105,7 @@ class InvokeAIAppConfig(BaseSettings):
|
||||
lazy_offload: Keep models in VRAM until their space is needed.
|
||||
log_memory_usage: If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.
|
||||
device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
|
||||
precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.<br>Valid values: `auto`, `float16`, `bfloat16`, `float32`, `autocast`
|
||||
precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.<br>Valid values: `auto`, `float16`, `bfloat16`, `float32`
|
||||
sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
|
||||
attention_type: Attention type.<br>Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp`
|
||||
attention_slice_size: Slice size, valid when attention_type=="sliced".<br>Valid values: `auto`, `balanced`, `max`, `1`, `2`, `3`, `4`, `5`, `6`, `7`, `8`
|
||||
@ -370,6 +370,9 @@ def migrate_v3_config_dict(config_dict: dict[str, Any]) -> InvokeAIAppConfig:
|
||||
# `max_vram_cache_size` was renamed to `vram` some time in v3, but both names were used
|
||||
if k == "max_vram_cache_size" and "vram" not in category_dict:
|
||||
parsed_config_dict["vram"] = v
|
||||
# autocast was removed in v4.0.1
|
||||
if k == "precision" and v == "autocast":
|
||||
parsed_config_dict["precision"] = "auto"
|
||||
if k == "conf_path":
|
||||
parsed_config_dict["legacy_models_yaml_path"] = v
|
||||
if k == "legacy_conf_dir":
|
||||
@ -392,6 +395,28 @@ def migrate_v3_config_dict(config_dict: dict[str, Any]) -> InvokeAIAppConfig:
|
||||
return config
|
||||
|
||||
|
||||
def migrate_v4_0_0_config_dict(config_dict: dict[str, Any]) -> InvokeAIAppConfig:
|
||||
"""Migrate v4.0.0 config dictionary to a current config object.
|
||||
|
||||
Args:
|
||||
config_dict: A dictionary of settings from a v4.0.0 config file.
|
||||
|
||||
Returns:
|
||||
An instance of `InvokeAIAppConfig` with the migrated settings.
|
||||
"""
|
||||
parsed_config_dict: dict[str, Any] = {}
|
||||
for k, v in config_dict.items():
|
||||
# autocast was removed from precision in v4.0.1
|
||||
if k == "precision" and v == "autocast":
|
||||
parsed_config_dict["precision"] = "auto"
|
||||
else:
|
||||
parsed_config_dict[k] = v
|
||||
if k == "schema_version":
|
||||
parsed_config_dict[k] = CONFIG_SCHEMA_VERSION
|
||||
config = DefaultInvokeAIAppConfig.model_validate(parsed_config_dict)
|
||||
return config
|
||||
|
||||
|
||||
def load_and_migrate_config(config_path: Path) -> InvokeAIAppConfig:
|
||||
"""Load and migrate a config file to the latest version.
|
||||
|
||||
@ -418,17 +443,21 @@ def load_and_migrate_config(config_path: Path) -> InvokeAIAppConfig:
|
||||
raise RuntimeError(f"Failed to load and migrate v3 config file {config_path}: {e}") from e
|
||||
migrated_config.write_file(config_path)
|
||||
return migrated_config
|
||||
else:
|
||||
# Attempt to load as a v4 config file
|
||||
try:
|
||||
# Meta is not included in the model fields, so we need to validate it separately
|
||||
config = InvokeAIAppConfig.model_validate(loaded_config_dict)
|
||||
assert (
|
||||
config.schema_version == CONFIG_SCHEMA_VERSION
|
||||
), f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config.schema_version}"
|
||||
return config
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to load config file {config_path}: {e}") from e
|
||||
|
||||
if loaded_config_dict["schema_version"] == "4.0.0":
|
||||
loaded_config_dict = migrate_v4_0_0_config_dict(loaded_config_dict)
|
||||
loaded_config_dict.write_file(config_path)
|
||||
|
||||
# Attempt to load as a v4 config file
|
||||
try:
|
||||
# Meta is not included in the model fields, so we need to validate it separately
|
||||
config = InvokeAIAppConfig.model_validate(loaded_config_dict)
|
||||
assert (
|
||||
config.schema_version == CONFIG_SCHEMA_VERSION
|
||||
), f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config.schema_version}"
|
||||
return config
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to load config file {config_path}: {e}") from e
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
|
@ -8,14 +8,13 @@ import time
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
from queue import Empty, PriorityQueue
|
||||
from typing import Any, Dict, List, Optional, Set
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set
|
||||
|
||||
import requests
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
from requests import HTTPError
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
from invokeai.app.util.misc import get_iso_timestamp
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
@ -30,6 +29,9 @@ from .download_base import (
|
||||
UnknownJobIDException,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
|
||||
# Maximum number of bytes to download during each call to requests.iter_content()
|
||||
DOWNLOAD_CHUNK_SIZE = 100000
|
||||
|
||||
@ -40,7 +42,7 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
def __init__(
|
||||
self,
|
||||
max_parallel_dl: int = 5,
|
||||
event_bus: Optional[EventServiceBase] = None,
|
||||
event_bus: Optional["EventServiceBase"] = None,
|
||||
requests_session: Optional[requests.sessions.Session] = None,
|
||||
):
|
||||
"""
|
||||
@ -270,7 +272,7 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
job.dest.parent.mkdir(parents=True, exist_ok=True)
|
||||
job.download_path = job.dest
|
||||
|
||||
assert job.download_path is not None
|
||||
assert job.download_path
|
||||
|
||||
# Don't clobber an existing file. See commit 82c2c85202f88c6d24ff84710f297cfc6ae174af
|
||||
# for code that instead resumes an interrupted download.
|
||||
@ -280,9 +282,6 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
# append ".downloading" to the path
|
||||
in_progress_path = self._in_progress_path(job.download_path)
|
||||
|
||||
# catch rare race condition that is appearing in unit tests.
|
||||
assert in_progress_path.parent.exists(), f"Directory doesn't exist! in_progress_path={in_progress_path}; parent={in_progress_path.parent}"
|
||||
|
||||
# signal caller that the download is starting. At this point, key fields such as
|
||||
# download_path and total_bytes will be populated. We call it here because the might
|
||||
# discover that the local file is already complete and generate a COMPLETED status.
|
||||
@ -321,10 +320,8 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
in_progress_path.rename(job.download_path)
|
||||
|
||||
def _validate_filename(self, directory: str, filename: str) -> bool:
|
||||
pc_name_max = os.pathconf(directory, "PC_NAME_MAX") if hasattr(os, "pathconf") else 260 # hardcoded for windows
|
||||
pc_path_max = (
|
||||
os.pathconf(directory, "PC_PATH_MAX") if hasattr(os, "pathconf") else 32767
|
||||
) # hardcoded for windows with long names enabled
|
||||
pc_name_max = get_pc_name_max(directory)
|
||||
pc_path_max = get_pc_path_max(directory)
|
||||
if "/" in filename:
|
||||
return False
|
||||
if filename.startswith(".."):
|
||||
@ -348,8 +345,7 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
f"An error occurred while processing the on_start callback: {traceback.format_exception(e)}"
|
||||
)
|
||||
if self._event_bus:
|
||||
assert job.download_path
|
||||
self._event_bus.emit_download_started(str(job.source), job.download_path.as_posix())
|
||||
self._event_bus.emit_download_started(job)
|
||||
|
||||
def _signal_job_progress(self, job: DownloadJob) -> None:
|
||||
if job.on_progress:
|
||||
@ -360,13 +356,7 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
f"An error occurred while processing the on_progress callback: {traceback.format_exception(e)}"
|
||||
)
|
||||
if self._event_bus:
|
||||
assert job.download_path
|
||||
self._event_bus.emit_download_progress(
|
||||
str(job.source),
|
||||
download_path=job.download_path.as_posix(),
|
||||
current_bytes=job.bytes,
|
||||
total_bytes=job.total_bytes,
|
||||
)
|
||||
self._event_bus.emit_download_progress(job)
|
||||
|
||||
def _signal_job_complete(self, job: DownloadJob) -> None:
|
||||
job.status = DownloadJobStatus.COMPLETED
|
||||
@ -378,10 +368,7 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
f"An error occurred while processing the on_complete callback: {traceback.format_exception(e)}"
|
||||
)
|
||||
if self._event_bus:
|
||||
assert job.download_path
|
||||
self._event_bus.emit_download_complete(
|
||||
str(job.source), download_path=job.download_path.as_posix(), total_bytes=job.total_bytes
|
||||
)
|
||||
self._event_bus.emit_download_complete(job)
|
||||
|
||||
def _signal_job_cancelled(self, job: DownloadJob) -> None:
|
||||
if job.status not in [DownloadJobStatus.RUNNING, DownloadJobStatus.WAITING]:
|
||||
@ -395,7 +382,7 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
f"An error occurred while processing the on_cancelled callback: {traceback.format_exception(e)}"
|
||||
)
|
||||
if self._event_bus:
|
||||
self._event_bus.emit_download_cancelled(str(job.source))
|
||||
self._event_bus.emit_download_cancelled(job)
|
||||
|
||||
def _signal_job_error(self, job: DownloadJob, excp: Optional[Exception] = None) -> None:
|
||||
job.status = DownloadJobStatus.ERROR
|
||||
@ -408,9 +395,7 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
f"An error occurred while processing the on_error callback: {traceback.format_exception(e)}"
|
||||
)
|
||||
if self._event_bus:
|
||||
assert job.error_type
|
||||
assert job.error
|
||||
self._event_bus.emit_download_error(str(job.source), error_type=job.error_type, error=job.error)
|
||||
self._event_bus.emit_download_error(job)
|
||||
|
||||
def _cleanup_cancelled_job(self, job: DownloadJob) -> None:
|
||||
self._logger.debug(f"Cleaning up leftover files from cancelled download job {job.download_path}")
|
||||
@ -422,6 +407,26 @@ class DownloadQueueService(DownloadQueueServiceBase):
|
||||
self._logger.warning(excp)
|
||||
|
||||
|
||||
def get_pc_name_max(directory: str) -> int:
|
||||
if hasattr(os, "pathconf"):
|
||||
try:
|
||||
return os.pathconf(directory, "PC_NAME_MAX")
|
||||
except OSError:
|
||||
# macOS w/ external drives raise OSError
|
||||
pass
|
||||
return 260 # hardcoded for windows
|
||||
|
||||
|
||||
def get_pc_path_max(directory: str) -> int:
|
||||
if hasattr(os, "pathconf"):
|
||||
try:
|
||||
return os.pathconf(directory, "PC_PATH_MAX")
|
||||
except OSError:
|
||||
# some platforms may not have this value
|
||||
pass
|
||||
return 32767 # hardcoded for windows with long names enabled
|
||||
|
||||
|
||||
# Example on_progress event handler to display a TQDM status bar
|
||||
# Activate with:
|
||||
# download_service.download(DownloadJob('http://foo.bar/baz', '/tmp', on_progress=TqdmProgress().update))
|
||||
|
@ -1,486 +1,253 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
|
||||
from invokeai.app.services.session_queue.session_queue_common import (
|
||||
BatchStatus,
|
||||
EnqueueBatchResult,
|
||||
SessionQueueItem,
|
||||
SessionQueueStatus,
|
||||
from invokeai.app.services.events.events_common import (
|
||||
BatchEnqueuedEvent,
|
||||
BulkDownloadCompleteEvent,
|
||||
BulkDownloadErrorEvent,
|
||||
BulkDownloadStartedEvent,
|
||||
DownloadCancelledEvent,
|
||||
DownloadCompleteEvent,
|
||||
DownloadErrorEvent,
|
||||
DownloadProgressEvent,
|
||||
DownloadStartedEvent,
|
||||
EventBase,
|
||||
ExtraData,
|
||||
InvocationCompleteEvent,
|
||||
InvocationDenoiseProgressEvent,
|
||||
InvocationErrorEvent,
|
||||
InvocationStartedEvent,
|
||||
ModelInstallCancelledEvent,
|
||||
ModelInstallCompleteEvent,
|
||||
ModelInstallDownloadProgressEvent,
|
||||
ModelInstallDownloadsCompleteEvent,
|
||||
ModelInstallErrorEvent,
|
||||
ModelInstallStartedEvent,
|
||||
ModelLoadCompleteEvent,
|
||||
ModelLoadStartedEvent,
|
||||
QueueClearedEvent,
|
||||
QueueItemStatusChangedEvent,
|
||||
SessionCanceledEvent,
|
||||
SessionCompleteEvent,
|
||||
SessionStartedEvent,
|
||||
)
|
||||
from invokeai.app.util.misc import get_timestamp
|
||||
from invokeai.backend.model_manager import AnyModelConfig
|
||||
from invokeai.backend.model_manager.config import SubModelType
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput
|
||||
from invokeai.app.services.download.download_base import DownloadJob
|
||||
from invokeai.app.services.events.events_common import EventBase
|
||||
from invokeai.app.services.model_install.model_install_common import ModelInstallJob
|
||||
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
|
||||
from invokeai.app.services.session_queue.session_queue_common import (
|
||||
BatchStatus,
|
||||
EnqueueBatchResult,
|
||||
SessionQueueItem,
|
||||
SessionQueueStatus,
|
||||
)
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, SubModelType
|
||||
|
||||
|
||||
class EventServiceBase:
|
||||
queue_event: str = "queue_event"
|
||||
bulk_download_event: str = "bulk_download_event"
|
||||
download_event: str = "download_event"
|
||||
model_event: str = "model_event"
|
||||
|
||||
"""Basic event bus, to have an empty stand-in when not needed"""
|
||||
|
||||
def dispatch(self, event_name: str, payload: Any) -> None:
|
||||
def dispatch(self, event: "EventBase") -> None:
|
||||
pass
|
||||
|
||||
def _emit_bulk_download_event(self, event_name: str, payload: dict) -> None:
|
||||
"""Bulk download events are emitted to a room with queue_id as the room name"""
|
||||
payload["timestamp"] = get_timestamp()
|
||||
self.dispatch(
|
||||
event_name=EventServiceBase.bulk_download_event,
|
||||
payload={"event": event_name, "data": payload},
|
||||
)
|
||||
# region: Invocation
|
||||
|
||||
def __emit_queue_event(self, event_name: str, payload: dict) -> None:
|
||||
"""Queue events are emitted to a room with queue_id as the room name"""
|
||||
payload["timestamp"] = get_timestamp()
|
||||
self.dispatch(
|
||||
event_name=EventServiceBase.queue_event,
|
||||
payload={"event": event_name, "data": payload},
|
||||
)
|
||||
|
||||
def __emit_download_event(self, event_name: str, payload: dict) -> None:
|
||||
payload["timestamp"] = get_timestamp()
|
||||
self.dispatch(
|
||||
event_name=EventServiceBase.download_event,
|
||||
payload={"event": event_name, "data": payload},
|
||||
)
|
||||
|
||||
def __emit_model_event(self, event_name: str, payload: dict) -> None:
|
||||
payload["timestamp"] = get_timestamp()
|
||||
self.dispatch(
|
||||
event_name=EventServiceBase.model_event,
|
||||
payload={"event": event_name, "data": payload},
|
||||
)
|
||||
|
||||
# Define events here for every event in the system.
|
||||
# This will make them easier to integrate until we find a schema generator.
|
||||
def emit_generator_progress(
|
||||
self,
|
||||
queue_id: str,
|
||||
queue_item_id: int,
|
||||
queue_batch_id: str,
|
||||
graph_execution_state_id: str,
|
||||
node_id: str,
|
||||
source_node_id: str,
|
||||
progress_image: Optional[ProgressImage],
|
||||
step: int,
|
||||
order: int,
|
||||
total_steps: int,
|
||||
def emit_invocation_started(
|
||||
self, queue_item: "SessionQueueItem", invocation: "BaseInvocation", extra: Optional[ExtraData] = None
|
||||
) -> None:
|
||||
"""Emitted when there is generation progress"""
|
||||
self.__emit_queue_event(
|
||||
event_name="generator_progress",
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
"node_id": node_id,
|
||||
"source_node_id": source_node_id,
|
||||
"progress_image": progress_image.model_dump(mode="json") if progress_image is not None else None,
|
||||
"step": step,
|
||||
"order": order,
|
||||
"total_steps": total_steps,
|
||||
},
|
||||
"""Emitted when an invocation is started"""
|
||||
self.dispatch(InvocationStartedEvent.build(queue_item, invocation, extra))
|
||||
|
||||
def emit_invocation_denoise_progress(
|
||||
self,
|
||||
queue_item: "SessionQueueItem",
|
||||
invocation: "BaseInvocation",
|
||||
intermediate_state: PipelineIntermediateState,
|
||||
progress_image: "ProgressImage",
|
||||
extra: Optional[ExtraData] = None,
|
||||
) -> None:
|
||||
"""Emitted at each step during denoising of an invocation."""
|
||||
self.dispatch(
|
||||
InvocationDenoiseProgressEvent.build(queue_item, invocation, intermediate_state, progress_image, extra)
|
||||
)
|
||||
|
||||
def emit_invocation_complete(
|
||||
self,
|
||||
queue_id: str,
|
||||
queue_item_id: int,
|
||||
queue_batch_id: str,
|
||||
graph_execution_state_id: str,
|
||||
result: dict,
|
||||
node: dict,
|
||||
source_node_id: str,
|
||||
queue_item: "SessionQueueItem",
|
||||
invocation: "BaseInvocation",
|
||||
output: "BaseInvocationOutput",
|
||||
extra: Optional[ExtraData] = None,
|
||||
) -> None:
|
||||
"""Emitted when an invocation has completed"""
|
||||
self.__emit_queue_event(
|
||||
event_name="invocation_complete",
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
"node": node,
|
||||
"source_node_id": source_node_id,
|
||||
"result": result,
|
||||
},
|
||||
)
|
||||
"""Emitted when an invocation is complete"""
|
||||
self.dispatch(InvocationCompleteEvent.build(queue_item, invocation, output, extra))
|
||||
|
||||
def emit_invocation_error(
|
||||
self,
|
||||
queue_id: str,
|
||||
queue_item_id: int,
|
||||
queue_batch_id: str,
|
||||
graph_execution_state_id: str,
|
||||
node: dict,
|
||||
source_node_id: str,
|
||||
queue_item: "SessionQueueItem",
|
||||
invocation: "BaseInvocation",
|
||||
error_type: str,
|
||||
error: str,
|
||||
extra: Optional[ExtraData] = None,
|
||||
) -> None:
|
||||
"""Emitted when an invocation has completed"""
|
||||
self.__emit_queue_event(
|
||||
event_name="invocation_error",
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
"node": node,
|
||||
"source_node_id": source_node_id,
|
||||
"error_type": error_type,
|
||||
"error": error,
|
||||
},
|
||||
)
|
||||
"""Emitted when an invocation encounters an error"""
|
||||
self.dispatch(InvocationErrorEvent.build(queue_item, invocation, error_type, error, extra))
|
||||
|
||||
def emit_invocation_started(
|
||||
self,
|
||||
queue_id: str,
|
||||
queue_item_id: int,
|
||||
queue_batch_id: str,
|
||||
graph_execution_state_id: str,
|
||||
node: dict,
|
||||
source_node_id: str,
|
||||
) -> None:
|
||||
"""Emitted when an invocation has started"""
|
||||
self.__emit_queue_event(
|
||||
event_name="invocation_started",
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
"node": node,
|
||||
"source_node_id": source_node_id,
|
||||
},
|
||||
)
|
||||
# endregion
|
||||
|
||||
def emit_graph_execution_complete(
|
||||
self, queue_id: str, queue_item_id: int, queue_batch_id: str, graph_execution_state_id: str
|
||||
) -> None:
|
||||
# region Session
|
||||
|
||||
def emit_session_started(self, queue_item: "SessionQueueItem", extra: Optional[ExtraData] = None) -> None:
|
||||
"""Emitted when a session has started"""
|
||||
self.dispatch(SessionStartedEvent.build(queue_item, extra))
|
||||
|
||||
def emit_session_complete(self, queue_item: "SessionQueueItem", extra: Optional[ExtraData] = None) -> None:
|
||||
"""Emitted when a session has completed all invocations"""
|
||||
self.__emit_queue_event(
|
||||
event_name="graph_execution_state_complete",
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
},
|
||||
)
|
||||
self.dispatch(SessionCompleteEvent.build(queue_item, extra))
|
||||
|
||||
def emit_model_load_started(
|
||||
self,
|
||||
queue_id: str,
|
||||
queue_item_id: int,
|
||||
queue_batch_id: str,
|
||||
graph_execution_state_id: str,
|
||||
model_config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> None:
|
||||
"""Emitted when a model is requested"""
|
||||
self.__emit_queue_event(
|
||||
event_name="model_load_started",
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
"model_config": model_config.model_dump(mode="json"),
|
||||
"submodel_type": submodel_type,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_model_load_completed(
|
||||
self,
|
||||
queue_id: str,
|
||||
queue_item_id: int,
|
||||
queue_batch_id: str,
|
||||
graph_execution_state_id: str,
|
||||
model_config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> None:
|
||||
"""Emitted when a model is correctly loaded (returns model info)"""
|
||||
self.__emit_queue_event(
|
||||
event_name="model_load_completed",
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
"model_config": model_config.model_dump(mode="json"),
|
||||
"submodel_type": submodel_type,
|
||||
},
|
||||
)
|
||||
|
||||
def emit_session_canceled(
|
||||
self,
|
||||
queue_id: str,
|
||||
queue_item_id: int,
|
||||
queue_batch_id: str,
|
||||
graph_execution_state_id: str,
|
||||
) -> None:
|
||||
def emit_session_canceled(self, queue_item: "SessionQueueItem", extra: Optional[ExtraData] = None) -> None:
|
||||
"""Emitted when a session is canceled"""
|
||||
self.__emit_queue_event(
|
||||
event_name="session_canceled",
|
||||
payload={
|
||||
"queue_id": queue_id,
|
||||
"queue_item_id": queue_item_id,
|
||||
"queue_batch_id": queue_batch_id,
|
||||
"graph_execution_state_id": graph_execution_state_id,
|
||||
},
|
||||
)
|
||||
self.dispatch(SessionCanceledEvent.build(queue_item, extra))
|
||||
|
||||
# endregion
|
||||
|
||||
# region Queue
|
||||
|
||||
def emit_queue_item_status_changed(
|
||||
self,
|
||||
session_queue_item: SessionQueueItem,
|
||||
batch_status: BatchStatus,
|
||||
queue_status: SessionQueueStatus,
|
||||
queue_item: "SessionQueueItem",
|
||||
batch_status: "BatchStatus",
|
||||
queue_status: "SessionQueueStatus",
|
||||
extra: Optional[ExtraData] = None,
|
||||
) -> None:
|
||||
"""Emitted when a queue item's status changes"""
|
||||
self.__emit_queue_event(
|
||||
event_name="queue_item_status_changed",
|
||||
payload={
|
||||
"queue_id": queue_status.queue_id,
|
||||
"queue_item": {
|
||||
"queue_id": session_queue_item.queue_id,
|
||||
"item_id": session_queue_item.item_id,
|
||||
"status": session_queue_item.status,
|
||||
"batch_id": session_queue_item.batch_id,
|
||||
"session_id": session_queue_item.session_id,
|
||||
"error": session_queue_item.error,
|
||||
"created_at": str(session_queue_item.created_at) if session_queue_item.created_at else None,
|
||||
"updated_at": str(session_queue_item.updated_at) if session_queue_item.updated_at else None,
|
||||
"started_at": str(session_queue_item.started_at) if session_queue_item.started_at else None,
|
||||
"completed_at": str(session_queue_item.completed_at) if session_queue_item.completed_at else None,
|
||||
},
|
||||
"batch_status": batch_status.model_dump(mode="json"),
|
||||
"queue_status": queue_status.model_dump(mode="json"),
|
||||
},
|
||||
)
|
||||
self.dispatch(QueueItemStatusChangedEvent.build(queue_item, batch_status, queue_status, extra))
|
||||
|
||||
def emit_batch_enqueued(self, enqueue_result: EnqueueBatchResult) -> None:
|
||||
def emit_batch_enqueued(self, enqueue_result: "EnqueueBatchResult", extra: Optional[ExtraData] = None) -> None:
|
||||
"""Emitted when a batch is enqueued"""
|
||||
self.__emit_queue_event(
|
||||
event_name="batch_enqueued",
|
||||
payload={
|
||||
"queue_id": enqueue_result.queue_id,
|
||||
"batch_id": enqueue_result.batch.batch_id,
|
||||
"enqueued": enqueue_result.enqueued,
|
||||
},
|
||||
)
|
||||
self.dispatch(BatchEnqueuedEvent.build(enqueue_result, extra))
|
||||
|
||||
def emit_queue_cleared(self, queue_id: str) -> None:
|
||||
"""Emitted when the queue is cleared"""
|
||||
self.__emit_queue_event(
|
||||
event_name="queue_cleared",
|
||||
payload={"queue_id": queue_id},
|
||||
)
|
||||
def emit_queue_cleared(self, queue_id: str, extra: Optional[ExtraData] = None) -> None:
|
||||
"""Emitted when a queue is cleared"""
|
||||
self.dispatch(QueueClearedEvent.build(queue_id, extra))
|
||||
|
||||
def emit_download_started(self, source: str, download_path: str) -> None:
|
||||
"""
|
||||
Emit when a download job is started.
|
||||
# endregion
|
||||
|
||||
:param url: The downloaded url
|
||||
"""
|
||||
self.__emit_download_event(
|
||||
event_name="download_started",
|
||||
payload={"source": source, "download_path": download_path},
|
||||
)
|
||||
# region Download
|
||||
|
||||
def emit_download_progress(self, source: str, download_path: str, current_bytes: int, total_bytes: int) -> None:
|
||||
"""
|
||||
Emit "download_progress" events at regular intervals during a download job.
|
||||
def emit_download_started(self, job: "DownloadJob", extra: Optional[ExtraData] = None) -> None:
|
||||
"""Emitted when a download is started"""
|
||||
self.dispatch(DownloadStartedEvent.build(job, extra))
|
||||
|
||||
:param source: The downloaded source
|
||||
:param download_path: The local downloaded file
|
||||
:param current_bytes: Number of bytes downloaded so far
|
||||
:param total_bytes: The size of the file being downloaded (if known)
|
||||
"""
|
||||
self.__emit_download_event(
|
||||
event_name="download_progress",
|
||||
payload={
|
||||
"source": source,
|
||||
"download_path": download_path,
|
||||
"current_bytes": current_bytes,
|
||||
"total_bytes": total_bytes,
|
||||
},
|
||||
)
|
||||
def emit_download_progress(self, job: "DownloadJob", extra: Optional[ExtraData] = None) -> None:
|
||||
"""Emitted at intervals during a download"""
|
||||
self.dispatch(DownloadProgressEvent.build(job, extra))
|
||||
|
||||
def emit_download_complete(self, source: str, download_path: str, total_bytes: int) -> None:
|
||||
"""
|
||||
Emit a "download_complete" event at the end of a successful download.
|
||||
def emit_download_complete(self, job: "DownloadJob", extra: Optional[ExtraData] = None) -> None:
|
||||
"""Emitted when a download is completed"""
|
||||
self.dispatch(DownloadCompleteEvent.build(job, extra))
|
||||
|
||||
:param source: Source URL
|
||||
:param download_path: Path to the locally downloaded file
|
||||
:param total_bytes: The size of the downloaded file
|
||||
"""
|
||||
self.__emit_download_event(
|
||||
event_name="download_complete",
|
||||
payload={
|
||||
"source": source,
|
||||
"download_path": download_path,
|
||||
"total_bytes": total_bytes,
|
||||
},
|
||||
)
|
||||
def emit_download_cancelled(self, job: "DownloadJob", extra: Optional[ExtraData] = None) -> None:
|
||||
"""Emitted when a download is cancelled"""
|
||||
self.dispatch(DownloadCancelledEvent.build(job, extra))
|
||||
|
||||
def emit_download_cancelled(self, source: str) -> None:
|
||||
"""Emit a "download_cancelled" event in the event that the download was cancelled by user."""
|
||||
self.__emit_download_event(
|
||||
event_name="download_cancelled",
|
||||
payload={
|
||||
"source": source,
|
||||
},
|
||||
)
|
||||
def emit_download_error(self, job: "DownloadJob", extra: Optional[ExtraData] = None) -> None:
|
||||
"""Emitted when a download encounters an error"""
|
||||
self.dispatch(DownloadErrorEvent.build(job, extra))
|
||||
|
||||
def emit_download_error(self, source: str, error_type: str, error: str) -> None:
|
||||
"""
|
||||
Emit a "download_error" event when an download job encounters an exception.
|
||||
# endregion
|
||||
|
||||
:param source: Source URL
|
||||
:param error_type: The name of the exception that raised the error
|
||||
:param error: The traceback from this error
|
||||
"""
|
||||
self.__emit_download_event(
|
||||
event_name="download_error",
|
||||
payload={
|
||||
"source": source,
|
||||
"error_type": error_type,
|
||||
"error": error,
|
||||
},
|
||||
)
|
||||
# region Model loading
|
||||
|
||||
def emit_model_install_downloading(
|
||||
def emit_model_load_started(
|
||||
self,
|
||||
source: str,
|
||||
local_path: str,
|
||||
bytes: int,
|
||||
total_bytes: int,
|
||||
parts: List[Dict[str, Union[str, int]]],
|
||||
id: int,
|
||||
config: "AnyModelConfig",
|
||||
submodel_type: Optional["SubModelType"] = None,
|
||||
extra: Optional[ExtraData] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Emit at intervals while the install job is in progress (remote models only).
|
||||
"""Emitted when a model load is started."""
|
||||
self.dispatch(ModelLoadStartedEvent.build(config, submodel_type, extra))
|
||||
|
||||
:param source: Source of the model
|
||||
:param local_path: Where model is downloading to
|
||||
:param parts: Progress of downloading URLs that comprise the model, if any.
|
||||
:param bytes: Number of bytes downloaded so far.
|
||||
:param total_bytes: Total size of download, including all files.
|
||||
This emits a Dict with keys "source", "local_path", "bytes" and "total_bytes".
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_downloading",
|
||||
payload={
|
||||
"source": source,
|
||||
"local_path": local_path,
|
||||
"bytes": bytes,
|
||||
"total_bytes": total_bytes,
|
||||
"parts": parts,
|
||||
"id": id,
|
||||
},
|
||||
)
|
||||
def emit_model_load_complete(
|
||||
self,
|
||||
config: "AnyModelConfig",
|
||||
submodel_type: Optional["SubModelType"] = None,
|
||||
extra: Optional[ExtraData] = None,
|
||||
) -> None:
|
||||
"""Emitted when a model load is complete."""
|
||||
self.dispatch(ModelLoadCompleteEvent.build(config, submodel_type, extra))
|
||||
|
||||
def emit_model_install_downloads_done(self, source: str) -> None:
|
||||
"""
|
||||
Emit once when all parts are downloaded, but before the probing and registration start.
|
||||
# endregion
|
||||
|
||||
:param source: Source of the model; local path, repo_id or url
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_downloads_done",
|
||||
payload={"source": source},
|
||||
)
|
||||
# region Model install
|
||||
|
||||
def emit_model_install_running(self, source: str) -> None:
|
||||
"""
|
||||
Emit once when an install job becomes active.
|
||||
def emit_model_install_download_progress(self, job: "ModelInstallJob", extra: Optional[ExtraData] = None) -> None:
|
||||
"""Emitted at intervals while the install job is in progress (remote models only)."""
|
||||
self.dispatch(ModelInstallDownloadProgressEvent.build(job, extra))
|
||||
|
||||
:param source: Source of the model; local path, repo_id or url
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_running",
|
||||
payload={"source": source},
|
||||
)
|
||||
def emit_model_install_downloads_complete(self, job: "ModelInstallJob", extra: Optional[ExtraData] = None) -> None:
|
||||
self.dispatch(ModelInstallDownloadsCompleteEvent.build(job, extra))
|
||||
|
||||
def emit_model_install_completed(self, source: str, key: str, id: int, total_bytes: Optional[int] = None) -> None:
|
||||
"""
|
||||
Emit when an install job is completed successfully.
|
||||
def emit_model_install_started(self, job: "ModelInstallJob", extra: Optional[ExtraData] = None) -> None:
|
||||
"""Emitted once when an install job is started (after any download)."""
|
||||
self.dispatch(ModelInstallStartedEvent.build(job, extra))
|
||||
|
||||
:param source: Source of the model; local path, repo_id or url
|
||||
:param key: Model config record key
|
||||
:param total_bytes: Size of the model (may be None for installation of a local path)
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_completed",
|
||||
payload={"source": source, "total_bytes": total_bytes, "key": key, "id": id},
|
||||
)
|
||||
def emit_model_install_complete(self, job: "ModelInstallJob", extra: Optional[ExtraData] = None) -> None:
|
||||
"""Emitted when an install job is completed successfully."""
|
||||
self.dispatch(ModelInstallCompleteEvent.build(job, extra))
|
||||
|
||||
def emit_model_install_cancelled(self, source: str, id: int) -> None:
|
||||
"""
|
||||
Emit when an install job is cancelled.
|
||||
def emit_model_install_cancelled(self, job: "ModelInstallJob", extra: Optional[ExtraData] = None) -> None:
|
||||
"""Emitted when an install job is cancelled."""
|
||||
self.dispatch(ModelInstallCancelledEvent.build(job, extra))
|
||||
|
||||
:param source: Source of the model; local path, repo_id or url
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_cancelled",
|
||||
payload={"source": source, "id": id},
|
||||
)
|
||||
def emit_model_install_error(self, job: "ModelInstallJob", extra: Optional[ExtraData] = None) -> None:
|
||||
"""Emitted when an install job encounters an exception."""
|
||||
self.dispatch(ModelInstallErrorEvent.build(job, extra))
|
||||
|
||||
def emit_model_install_error(self, source: str, error_type: str, error: str, id: int) -> None:
|
||||
"""
|
||||
Emit when an install job encounters an exception.
|
||||
# endregion
|
||||
|
||||
:param source: Source of the model
|
||||
:param error_type: The name of the exception
|
||||
:param error: A text description of the exception
|
||||
"""
|
||||
self.__emit_model_event(
|
||||
event_name="model_install_error",
|
||||
payload={"source": source, "error_type": error_type, "error": error, "id": id},
|
||||
)
|
||||
# region Bulk image download
|
||||
|
||||
def emit_bulk_download_started(
|
||||
self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str
|
||||
self,
|
||||
bulk_download_id: str,
|
||||
bulk_download_item_id: str,
|
||||
bulk_download_item_name: str,
|
||||
extra: Optional[ExtraData] = None,
|
||||
) -> None:
|
||||
"""Emitted when a bulk download starts"""
|
||||
self._emit_bulk_download_event(
|
||||
event_name="bulk_download_started",
|
||||
payload={
|
||||
"bulk_download_id": bulk_download_id,
|
||||
"bulk_download_item_id": bulk_download_item_id,
|
||||
"bulk_download_item_name": bulk_download_item_name,
|
||||
},
|
||||
"""Emitted when a bulk image download is started"""
|
||||
self.dispatch(
|
||||
BulkDownloadStartedEvent.build(bulk_download_id, bulk_download_item_id, bulk_download_item_name, extra)
|
||||
)
|
||||
|
||||
def emit_bulk_download_completed(
|
||||
self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str
|
||||
def emit_bulk_download_complete(
|
||||
self,
|
||||
bulk_download_id: str,
|
||||
bulk_download_item_id: str,
|
||||
bulk_download_item_name: str,
|
||||
extra: Optional[ExtraData] = None,
|
||||
) -> None:
|
||||
"""Emitted when a bulk download completes"""
|
||||
self._emit_bulk_download_event(
|
||||
event_name="bulk_download_completed",
|
||||
payload={
|
||||
"bulk_download_id": bulk_download_id,
|
||||
"bulk_download_item_id": bulk_download_item_id,
|
||||
"bulk_download_item_name": bulk_download_item_name,
|
||||
},
|
||||
"""Emitted when a bulk image download is complete"""
|
||||
self.dispatch(
|
||||
BulkDownloadCompleteEvent.build(bulk_download_id, bulk_download_item_id, bulk_download_item_name, extra)
|
||||
)
|
||||
|
||||
def emit_bulk_download_failed(
|
||||
self, bulk_download_id: str, bulk_download_item_id: str, bulk_download_item_name: str, error: str
|
||||
def emit_bulk_download_error(
|
||||
self,
|
||||
bulk_download_id: str,
|
||||
bulk_download_item_id: str,
|
||||
bulk_download_item_name: str,
|
||||
error: str,
|
||||
extra: Optional[ExtraData] = None,
|
||||
) -> None:
|
||||
"""Emitted when a bulk download fails"""
|
||||
self._emit_bulk_download_event(
|
||||
event_name="bulk_download_failed",
|
||||
payload={
|
||||
"bulk_download_id": bulk_download_id,
|
||||
"bulk_download_item_id": bulk_download_item_id,
|
||||
"bulk_download_item_name": bulk_download_item_name,
|
||||
"error": error,
|
||||
},
|
||||
"""Emitted when a bulk image download has an error"""
|
||||
self.dispatch(
|
||||
BulkDownloadErrorEvent.build(bulk_download_id, bulk_download_item_id, bulk_download_item_name, error, extra)
|
||||
)
|
||||
|
||||
# endregion
|
||||
|
707
invokeai/app/services/events/events_common.py
Normal file
707
invokeai/app/services/events/events_common.py
Normal file
@ -0,0 +1,707 @@
|
||||
from math import floor
|
||||
from typing import TYPE_CHECKING, Any, Coroutine, Optional, Protocol, TypeAlias, TypeVar
|
||||
|
||||
from fastapi_events.handlers.local import local_handler
|
||||
from pydantic import BaseModel, ConfigDict, Field, SerializeAsAny
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput
|
||||
from invokeai.app.services.session_processor.session_processor_common import ProgressImage
|
||||
from invokeai.app.services.session_queue.session_queue_common import (
|
||||
QUEUE_ITEM_STATUS,
|
||||
BatchStatus,
|
||||
EnqueueBatchResult,
|
||||
SessionQueueItem,
|
||||
SessionQueueStatus,
|
||||
)
|
||||
from invokeai.app.util.misc import get_timestamp
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig, SubModelType
|
||||
from invokeai.backend.stable_diffusion.diffusers_pipeline import PipelineIntermediateState
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from invokeai.app.services.download.download_base import DownloadJob
|
||||
from invokeai.app.services.model_install.model_install_common import ModelInstallJob
|
||||
|
||||
|
||||
ExtraData: TypeAlias = dict[str, Any]
|
||||
|
||||
|
||||
class EventBase(BaseModel):
|
||||
"""Base class for all events. All events must inherit from this class.
|
||||
|
||||
Events must define a class attribute `__event_name__` to identify the event.
|
||||
|
||||
All other attributes should be defined as normal for a pydantic model.
|
||||
|
||||
A timestamp is automatically added to the event when it is created.
|
||||
"""
|
||||
|
||||
timestamp: int = Field(description="The timestamp of the event", default_factory=get_timestamp)
|
||||
extra: Optional[ExtraData] = Field(default=None, description="Extra data to include with the event")
|
||||
|
||||
model_config = ConfigDict(json_schema_serialization_defaults_required=True)
|
||||
|
||||
@classmethod
|
||||
def get_events(cls) -> set[type["EventBase"]]:
|
||||
"""Get a set of all event models."""
|
||||
|
||||
event_subclasses: set[type["EventBase"]] = set()
|
||||
for subclass in cls.__subclasses__():
|
||||
# We only want to include subclasses that are event models, not intermediary classes
|
||||
if hasattr(subclass, "__event_name__"):
|
||||
event_subclasses.add(subclass)
|
||||
event_subclasses.update(subclass.get_events())
|
||||
|
||||
return event_subclasses
|
||||
|
||||
|
||||
TEvent = TypeVar("TEvent", bound=EventBase)
|
||||
|
||||
FastAPIEvent: TypeAlias = tuple[str, TEvent]
|
||||
"""
|
||||
A tuple representing a `fastapi-events` event, with the event name and payload.
|
||||
Provide a generic type to `TEvent` to specify the payload type.
|
||||
"""
|
||||
|
||||
|
||||
class FastAPIEventFunc(Protocol):
|
||||
def __call__(self, event: FastAPIEvent[Any]) -> Optional[Coroutine[Any, Any, None]]: ...
|
||||
|
||||
|
||||
def register_events(events: set[type[TEvent]], func: FastAPIEventFunc) -> None:
|
||||
"""Register a function to handle a list of events.
|
||||
|
||||
:param events: A list of event classes to handle
|
||||
:param func: The function to handle the events
|
||||
"""
|
||||
for event in events:
|
||||
assert hasattr(event, "__event_name__")
|
||||
local_handler.register(event_name=event.__event_name__, _func=func) # pyright: ignore [reportUnknownMemberType, reportUnknownArgumentType, reportAttributeAccessIssue]
|
||||
|
||||
|
||||
class QueueEventBase(EventBase):
|
||||
"""Base class for queue events"""
|
||||
|
||||
queue_id: str = Field(description="The ID of the queue")
|
||||
|
||||
|
||||
class QueueItemEventBase(QueueEventBase):
|
||||
"""Base class for queue item events"""
|
||||
|
||||
item_id: int = Field(description="The ID of the queue item")
|
||||
batch_id: str = Field(description="The ID of the queue batch")
|
||||
|
||||
|
||||
class SessionEventBase(QueueItemEventBase):
|
||||
"""Base class for session (aka graph execution state) events"""
|
||||
|
||||
session_id: str = Field(description="The ID of the session (aka graph execution state)")
|
||||
|
||||
|
||||
class InvocationEventBase(SessionEventBase):
|
||||
"""Base class for invocation events"""
|
||||
|
||||
queue_id: str = Field(description="The ID of the queue")
|
||||
item_id: int = Field(description="The ID of the queue item")
|
||||
batch_id: str = Field(description="The ID of the queue batch")
|
||||
session_id: str = Field(description="The ID of the session (aka graph execution state)")
|
||||
invocation_id: str = Field(description="The ID of the invocation")
|
||||
invocation_source_id: str = Field(description="The ID of the prepared invocation's source node")
|
||||
invocation_type: str = Field(description="The type of invocation")
|
||||
|
||||
|
||||
class InvocationStartedEvent(InvocationEventBase):
|
||||
"""Event model for invocation_started"""
|
||||
|
||||
__event_name__ = "invocation_started"
|
||||
|
||||
@classmethod
|
||||
def build(
|
||||
cls, queue_item: SessionQueueItem, invocation: BaseInvocation, extra: Optional[ExtraData] = None
|
||||
) -> "InvocationStartedEvent":
|
||||
return cls(
|
||||
queue_id=queue_item.queue_id,
|
||||
item_id=queue_item.item_id,
|
||||
batch_id=queue_item.batch_id,
|
||||
session_id=queue_item.session_id,
|
||||
invocation_id=invocation.id,
|
||||
invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id],
|
||||
invocation_type=invocation.get_type(),
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class InvocationDenoiseProgressEvent(InvocationEventBase):
|
||||
"""Event model for invocation_denoise_progress"""
|
||||
|
||||
__event_name__ = "invocation_denoise_progress"
|
||||
|
||||
progress_image: ProgressImage = Field(description="The progress image sent at each step during processing")
|
||||
step: int = Field(description="The current step of the invocation")
|
||||
total_steps: int = Field(description="The total number of steps in the invocation")
|
||||
order: int = Field(description="The order of the invocation in the session")
|
||||
percentage: float = Field(description="The percentage of completion of the invocation")
|
||||
|
||||
@classmethod
|
||||
def build(
|
||||
cls,
|
||||
queue_item: SessionQueueItem,
|
||||
invocation: BaseInvocation,
|
||||
intermediate_state: PipelineIntermediateState,
|
||||
progress_image: ProgressImage,
|
||||
extra: Optional[ExtraData] = None,
|
||||
) -> "InvocationDenoiseProgressEvent":
|
||||
step = intermediate_state.step
|
||||
total_steps = intermediate_state.total_steps
|
||||
order = intermediate_state.order
|
||||
return cls(
|
||||
queue_id=queue_item.queue_id,
|
||||
item_id=queue_item.item_id,
|
||||
batch_id=queue_item.batch_id,
|
||||
session_id=queue_item.session_id,
|
||||
invocation_id=invocation.id,
|
||||
invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id],
|
||||
invocation_type=invocation.get_type(),
|
||||
progress_image=progress_image,
|
||||
step=step,
|
||||
total_steps=total_steps,
|
||||
order=order,
|
||||
percentage=cls.calc_percentage(step, total_steps, order),
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def calc_percentage(step: int, total_steps: int, scheduler_order: float) -> float:
|
||||
"""Calculate the percentage of completion of denoising."""
|
||||
if total_steps == 0:
|
||||
return 0.0
|
||||
if scheduler_order == 2:
|
||||
return floor((step + 1 + 1) / 2) / floor((total_steps + 1) / 2)
|
||||
# order == 1
|
||||
return (step + 1 + 1) / (total_steps + 1)
|
||||
|
||||
|
||||
class InvocationCompleteEvent(InvocationEventBase):
|
||||
"""Event model for invocation_complete"""
|
||||
|
||||
__event_name__ = "invocation_complete"
|
||||
|
||||
result: SerializeAsAny[BaseInvocationOutput] = Field(description="The result of the invocation")
|
||||
|
||||
@classmethod
|
||||
def build(
|
||||
cls,
|
||||
queue_item: SessionQueueItem,
|
||||
invocation: BaseInvocation,
|
||||
result: BaseInvocationOutput,
|
||||
extra: Optional[ExtraData] = None,
|
||||
) -> "InvocationCompleteEvent":
|
||||
return cls(
|
||||
queue_id=queue_item.queue_id,
|
||||
item_id=queue_item.item_id,
|
||||
batch_id=queue_item.batch_id,
|
||||
session_id=queue_item.session_id,
|
||||
invocation_id=invocation.id,
|
||||
invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id],
|
||||
invocation_type=invocation.get_type(),
|
||||
result=result,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class InvocationErrorEvent(InvocationEventBase):
|
||||
"""Event model for invocation_error"""
|
||||
|
||||
__event_name__ = "invocation_error"
|
||||
|
||||
error_type: str = Field(description="The type of error")
|
||||
error: str = Field(description="The error message")
|
||||
|
||||
@classmethod
|
||||
def build(
|
||||
cls,
|
||||
queue_item: SessionQueueItem,
|
||||
invocation: BaseInvocation,
|
||||
error_type: str,
|
||||
error: str,
|
||||
extra: Optional[ExtraData] = None,
|
||||
) -> "InvocationErrorEvent":
|
||||
return cls(
|
||||
queue_id=queue_item.queue_id,
|
||||
item_id=queue_item.item_id,
|
||||
batch_id=queue_item.batch_id,
|
||||
session_id=queue_item.session_id,
|
||||
invocation_id=invocation.id,
|
||||
invocation_source_id=queue_item.session.prepared_source_mapping[invocation.id],
|
||||
invocation_type=invocation.get_type(),
|
||||
error_type=error_type,
|
||||
error=error,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class SessionStartedEvent(SessionEventBase):
|
||||
"""Event model for session_started"""
|
||||
|
||||
__event_name__ = "session_started"
|
||||
|
||||
@classmethod
|
||||
def build(cls, queue_item: SessionQueueItem, extra: Optional[ExtraData] = None) -> "SessionStartedEvent":
|
||||
return cls(
|
||||
queue_id=queue_item.queue_id,
|
||||
item_id=queue_item.item_id,
|
||||
batch_id=queue_item.batch_id,
|
||||
session_id=queue_item.session_id,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class SessionCompleteEvent(SessionEventBase):
|
||||
"""Event model for session_complete"""
|
||||
|
||||
__event_name__ = "session_complete"
|
||||
|
||||
@classmethod
|
||||
def build(cls, queue_item: SessionQueueItem, extra: Optional[ExtraData] = None) -> "SessionCompleteEvent":
|
||||
return cls(
|
||||
queue_id=queue_item.queue_id,
|
||||
item_id=queue_item.item_id,
|
||||
batch_id=queue_item.batch_id,
|
||||
session_id=queue_item.session_id,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class SessionCanceledEvent(SessionEventBase):
|
||||
"""Event model for session_canceled"""
|
||||
|
||||
__event_name__ = "session_canceled"
|
||||
|
||||
@classmethod
|
||||
def build(cls, queue_item: SessionQueueItem, extra: Optional[ExtraData] = None) -> "SessionCanceledEvent":
|
||||
return cls(
|
||||
queue_id=queue_item.queue_id,
|
||||
item_id=queue_item.item_id,
|
||||
batch_id=queue_item.batch_id,
|
||||
session_id=queue_item.session_id,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class QueueItemStatusChangedEvent(QueueItemEventBase):
|
||||
"""Event model for queue_item_status_changed"""
|
||||
|
||||
__event_name__ = "queue_item_status_changed"
|
||||
|
||||
status: QUEUE_ITEM_STATUS = Field(description="The new status of the queue item")
|
||||
error: Optional[str] = Field(default=None, description="The error message, if any")
|
||||
created_at: Optional[str] = Field(default=None, description="The timestamp when the queue item was created")
|
||||
updated_at: Optional[str] = Field(default=None, description="The timestamp when the queue item was last updated")
|
||||
started_at: Optional[str] = Field(default=None, description="The timestamp when the queue item was started")
|
||||
completed_at: Optional[str] = Field(default=None, description="The timestamp when the queue item was completed")
|
||||
batch_status: BatchStatus = Field(description="The status of the batch")
|
||||
queue_status: SessionQueueStatus = Field(description="The status of the queue")
|
||||
|
||||
@classmethod
|
||||
def build(
|
||||
cls,
|
||||
queue_item: SessionQueueItem,
|
||||
batch_status: BatchStatus,
|
||||
queue_status: SessionQueueStatus,
|
||||
extra: Optional[ExtraData] = None,
|
||||
) -> "QueueItemStatusChangedEvent":
|
||||
return cls(
|
||||
queue_id=queue_item.queue_id,
|
||||
item_id=queue_item.item_id,
|
||||
batch_id=queue_item.batch_id,
|
||||
status=queue_item.status,
|
||||
error=queue_item.error,
|
||||
created_at=str(queue_item.created_at) if queue_item.created_at else None,
|
||||
updated_at=str(queue_item.updated_at) if queue_item.updated_at else None,
|
||||
started_at=str(queue_item.started_at) if queue_item.started_at else None,
|
||||
completed_at=str(queue_item.completed_at) if queue_item.completed_at else None,
|
||||
batch_status=batch_status,
|
||||
queue_status=queue_status,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class BatchEnqueuedEvent(QueueEventBase):
|
||||
"""Event model for batch_enqueued"""
|
||||
|
||||
__event_name__ = "batch_enqueued"
|
||||
|
||||
batch_id: str = Field(description="The ID of the batch")
|
||||
enqueued: int = Field(description="The number of invocations enqueued")
|
||||
requested: int = Field(
|
||||
description="The number of invocations initially requested to be enqueued (may be less than enqueued if queue was full)"
|
||||
)
|
||||
priority: int = Field(description="The priority of the batch")
|
||||
|
||||
@classmethod
|
||||
def build(cls, enqueue_result: EnqueueBatchResult, extra: Optional[ExtraData] = None) -> "BatchEnqueuedEvent":
|
||||
return cls(
|
||||
queue_id=enqueue_result.queue_id,
|
||||
batch_id=enqueue_result.batch.batch_id,
|
||||
enqueued=enqueue_result.enqueued,
|
||||
requested=enqueue_result.requested,
|
||||
priority=enqueue_result.priority,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class QueueClearedEvent(QueueEventBase):
|
||||
"""Event model for queue_cleared"""
|
||||
|
||||
__event_name__ = "queue_cleared"
|
||||
|
||||
@classmethod
|
||||
def build(cls, queue_id: str, extra: Optional[ExtraData] = None) -> "QueueClearedEvent":
|
||||
return cls(
|
||||
queue_id=queue_id,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class DownloadEventBase(EventBase):
|
||||
"""Base class for events associated with a download"""
|
||||
|
||||
source: str = Field(description="The source of the download")
|
||||
|
||||
|
||||
class DownloadStartedEvent(DownloadEventBase):
|
||||
"""Event model for download_started"""
|
||||
|
||||
__event_name__ = "download_started"
|
||||
|
||||
download_path: str = Field(description="The local path where the download is saved")
|
||||
|
||||
@classmethod
|
||||
def build(cls, job: "DownloadJob", extra: Optional[ExtraData] = None) -> "DownloadStartedEvent":
|
||||
assert job.download_path
|
||||
return cls(
|
||||
source=str(job.source),
|
||||
download_path=job.download_path.as_posix(),
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class DownloadProgressEvent(DownloadEventBase):
|
||||
"""Event model for download_progress"""
|
||||
|
||||
__event_name__ = "download_progress"
|
||||
|
||||
download_path: str = Field(description="The local path where the download is saved")
|
||||
current_bytes: int = Field(description="The number of bytes downloaded so far")
|
||||
total_bytes: int = Field(description="The total number of bytes to be downloaded")
|
||||
|
||||
@classmethod
|
||||
def build(cls, job: "DownloadJob", extra: Optional[ExtraData] = None) -> "DownloadProgressEvent":
|
||||
assert job.download_path
|
||||
return cls(
|
||||
source=str(job.source),
|
||||
download_path=job.download_path.as_posix(),
|
||||
current_bytes=job.bytes,
|
||||
total_bytes=job.total_bytes,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class DownloadCompleteEvent(DownloadEventBase):
|
||||
"""Event model for download_complete"""
|
||||
|
||||
__event_name__ = "download_complete"
|
||||
|
||||
download_path: str = Field(description="The local path where the download is saved")
|
||||
total_bytes: int = Field(description="The total number of bytes downloaded")
|
||||
|
||||
@classmethod
|
||||
def build(cls, job: "DownloadJob", extra: Optional[ExtraData] = None) -> "DownloadCompleteEvent":
|
||||
assert job.download_path
|
||||
return cls(
|
||||
source=str(job.source),
|
||||
download_path=job.download_path.as_posix(),
|
||||
total_bytes=job.total_bytes,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class DownloadCancelledEvent(DownloadEventBase):
|
||||
"""Event model for download_cancelled"""
|
||||
|
||||
__event_name__ = "download_cancelled"
|
||||
|
||||
@classmethod
|
||||
def build(cls, job: "DownloadJob", extra: Optional[ExtraData] = None) -> "DownloadCancelledEvent":
|
||||
return cls(
|
||||
source=str(job.source),
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class DownloadErrorEvent(DownloadEventBase):
|
||||
"""Event model for download_error"""
|
||||
|
||||
__event_name__ = "download_error"
|
||||
|
||||
error_type: str = Field(description="The type of error")
|
||||
error: str = Field(description="The error message")
|
||||
|
||||
@classmethod
|
||||
def build(cls, job: "DownloadJob", extra: Optional[ExtraData] = None) -> "DownloadErrorEvent":
|
||||
assert job.error_type
|
||||
assert job.error
|
||||
return cls(
|
||||
source=str(job.source),
|
||||
error_type=job.error_type,
|
||||
error=job.error,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class ModelEventBase(EventBase):
|
||||
"""Base class for events associated with a model"""
|
||||
|
||||
|
||||
class ModelLoadStartedEvent(ModelEventBase):
|
||||
"""Event model for model_load_started"""
|
||||
|
||||
__event_name__ = "model_load_started"
|
||||
|
||||
config: AnyModelConfig = Field(description="The model's config")
|
||||
submodel_type: Optional[SubModelType] = Field(default=None, description="The submodel type, if any")
|
||||
|
||||
@classmethod
|
||||
def build(
|
||||
cls, config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, extra: Optional[ExtraData] = None
|
||||
) -> "ModelLoadStartedEvent":
|
||||
return cls(
|
||||
config=config,
|
||||
submodel_type=submodel_type,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class ModelLoadCompleteEvent(ModelEventBase):
|
||||
"""Event model for model_load_complete"""
|
||||
|
||||
__event_name__ = "model_load_complete"
|
||||
|
||||
config: AnyModelConfig = Field(description="The model's config")
|
||||
submodel_type: Optional[SubModelType] = Field(default=None, description="The submodel type, if any")
|
||||
|
||||
@classmethod
|
||||
def build(
|
||||
cls, config: AnyModelConfig, submodel_type: Optional[SubModelType] = None, extra: Optional[ExtraData] = None
|
||||
) -> "ModelLoadCompleteEvent":
|
||||
return cls(
|
||||
config=config,
|
||||
submodel_type=submodel_type,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class ModelInstallDownloadProgressEvent(ModelEventBase):
|
||||
"""Event model for model_install_download_progress"""
|
||||
|
||||
__event_name__ = "model_install_download_progress"
|
||||
|
||||
id: int = Field(description="The ID of the install job")
|
||||
source: str = Field(description="Source of the model; local path, repo_id or url")
|
||||
local_path: str = Field(description="Where model is downloading to")
|
||||
bytes: int = Field(description="Number of bytes downloaded so far")
|
||||
total_bytes: int = Field(description="Total size of download, including all files")
|
||||
parts: list[dict[str, int | str]] = Field(
|
||||
description="Progress of downloading URLs that comprise the model, if any"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def build(cls, job: "ModelInstallJob", extra: Optional[ExtraData] = None) -> "ModelInstallDownloadProgressEvent":
|
||||
parts: list[dict[str, str | int]] = [
|
||||
{
|
||||
"url": str(x.source),
|
||||
"local_path": str(x.download_path),
|
||||
"bytes": x.bytes,
|
||||
"total_bytes": x.total_bytes,
|
||||
}
|
||||
for x in job.download_parts
|
||||
]
|
||||
return cls(
|
||||
id=job.id,
|
||||
source=str(job.source),
|
||||
local_path=job.local_path.as_posix(),
|
||||
parts=parts,
|
||||
bytes=job.bytes,
|
||||
total_bytes=job.total_bytes,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class ModelInstallDownloadsCompleteEvent(ModelEventBase):
|
||||
"""Emitted once when an install job becomes active."""
|
||||
|
||||
__event_name__ = "model_install_downloads_complete"
|
||||
|
||||
id: int = Field(description="The ID of the install job")
|
||||
source: str = Field(description="Source of the model; local path, repo_id or url")
|
||||
|
||||
@classmethod
|
||||
def build(cls, job: "ModelInstallJob", extra: Optional[ExtraData] = None) -> "ModelInstallDownloadsCompleteEvent":
|
||||
return cls(
|
||||
id=job.id,
|
||||
source=str(job.source),
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class ModelInstallStartedEvent(ModelEventBase):
|
||||
"""Event model for model_install_started"""
|
||||
|
||||
__event_name__ = "model_install_started"
|
||||
|
||||
id: int = Field(description="The ID of the install job")
|
||||
source: str = Field(description="Source of the model; local path, repo_id or url")
|
||||
|
||||
@classmethod
|
||||
def build(cls, job: "ModelInstallJob", extra: Optional[ExtraData] = None) -> "ModelInstallStartedEvent":
|
||||
return cls(
|
||||
id=job.id,
|
||||
source=str(job.source),
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class ModelInstallCompleteEvent(ModelEventBase):
|
||||
"""Event model for model_install_complete"""
|
||||
|
||||
__event_name__ = "model_install_complete"
|
||||
|
||||
id: int = Field(description="The ID of the install job")
|
||||
source: str = Field(description="Source of the model; local path, repo_id or url")
|
||||
key: str = Field(description="Model config record key")
|
||||
total_bytes: Optional[int] = Field(description="Size of the model (may be None for installation of a local path)")
|
||||
|
||||
@classmethod
|
||||
def build(cls, job: "ModelInstallJob", extra: Optional[ExtraData] = None) -> "ModelInstallCompleteEvent":
|
||||
assert job.config_out is not None
|
||||
return cls(
|
||||
id=job.id,
|
||||
source=str(job.source),
|
||||
key=(job.config_out.key),
|
||||
total_bytes=job.total_bytes,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class ModelInstallCancelledEvent(ModelEventBase):
|
||||
"""Event model for model_install_cancelled"""
|
||||
|
||||
__event_name__ = "model_install_cancelled"
|
||||
|
||||
id: int = Field(description="The ID of the install job")
|
||||
source: str = Field(description="Source of the model; local path, repo_id or url")
|
||||
|
||||
@classmethod
|
||||
def build(cls, job: "ModelInstallJob", extra: Optional[ExtraData] = None) -> "ModelInstallCancelledEvent":
|
||||
return cls(
|
||||
id=job.id,
|
||||
source=str(job.source),
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class ModelInstallErrorEvent(ModelEventBase):
|
||||
"""Event model for model_install_error"""
|
||||
|
||||
__event_name__ = "model_install_error"
|
||||
|
||||
id: int = Field(description="The ID of the install job")
|
||||
source: str = Field(description="Source of the model; local path, repo_id or url")
|
||||
error_type: str = Field(description="The name of the exception")
|
||||
error: str = Field(description="A text description of the exception")
|
||||
|
||||
@classmethod
|
||||
def build(cls, job: "ModelInstallJob", extra: Optional[ExtraData] = None) -> "ModelInstallErrorEvent":
|
||||
assert job.error_type is not None
|
||||
assert job.error is not None
|
||||
return cls(
|
||||
id=job.id,
|
||||
source=str(job.source),
|
||||
error_type=job.error_type,
|
||||
error=job.error,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class BulkDownloadEventBase(EventBase):
|
||||
"""Base class for events associated with a bulk image download"""
|
||||
|
||||
bulk_download_id: str = Field(description="The ID of the bulk image download")
|
||||
bulk_download_item_id: str = Field(description="The ID of the bulk image download item")
|
||||
bulk_download_item_name: str = Field(description="The name of the bulk image download item")
|
||||
|
||||
|
||||
class BulkDownloadStartedEvent(BulkDownloadEventBase):
|
||||
"""Event model for bulk_download_started"""
|
||||
|
||||
__event_name__ = "bulk_download_started"
|
||||
|
||||
@classmethod
|
||||
def build(
|
||||
cls,
|
||||
bulk_download_id: str,
|
||||
bulk_download_item_id: str,
|
||||
bulk_download_item_name: str,
|
||||
extra: Optional[ExtraData] = None,
|
||||
) -> "BulkDownloadStartedEvent":
|
||||
return cls(
|
||||
bulk_download_id=bulk_download_id,
|
||||
bulk_download_item_id=bulk_download_item_id,
|
||||
bulk_download_item_name=bulk_download_item_name,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class BulkDownloadCompleteEvent(BulkDownloadEventBase):
|
||||
"""Event model for bulk_download_complete"""
|
||||
|
||||
__event_name__ = "bulk_download_complete"
|
||||
|
||||
@classmethod
|
||||
def build(
|
||||
cls,
|
||||
bulk_download_id: str,
|
||||
bulk_download_item_id: str,
|
||||
bulk_download_item_name: str,
|
||||
extra: Optional[ExtraData] = None,
|
||||
) -> "BulkDownloadCompleteEvent":
|
||||
return cls(
|
||||
bulk_download_id=bulk_download_id,
|
||||
bulk_download_item_id=bulk_download_item_id,
|
||||
bulk_download_item_name=bulk_download_item_name,
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
|
||||
class BulkDownloadErrorEvent(BulkDownloadEventBase):
|
||||
"""Event model for bulk_download_error"""
|
||||
|
||||
__event_name__ = "bulk_download_error"
|
||||
|
||||
error: str = Field(description="The error message")
|
||||
|
||||
@classmethod
|
||||
def build(
|
||||
cls,
|
||||
bulk_download_id: str,
|
||||
bulk_download_item_id: str,
|
||||
bulk_download_item_name: str,
|
||||
error: str,
|
||||
extra: Optional[ExtraData] = None,
|
||||
) -> "BulkDownloadErrorEvent":
|
||||
return cls(
|
||||
bulk_download_id=bulk_download_id,
|
||||
bulk_download_item_id=bulk_download_item_id,
|
||||
bulk_download_item_name=bulk_download_item_name,
|
||||
error=error,
|
||||
extra=extra,
|
||||
)
|
46
invokeai/app/services/events/events_fastapievents.py
Normal file
46
invokeai/app/services/events/events_fastapievents.py
Normal file
@ -0,0 +1,46 @@
|
||||
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||
|
||||
import asyncio
|
||||
import threading
|
||||
from queue import Empty, Queue
|
||||
|
||||
from fastapi_events.dispatcher import dispatch
|
||||
|
||||
from invokeai.app.services.events.events_common import (
|
||||
EventBase,
|
||||
)
|
||||
|
||||
from .events_base import EventServiceBase
|
||||
|
||||
|
||||
class FastAPIEventService(EventServiceBase):
|
||||
def __init__(self, event_handler_id: int) -> None:
|
||||
self.event_handler_id = event_handler_id
|
||||
self._queue = Queue[EventBase | None]()
|
||||
self._stop_event = threading.Event()
|
||||
asyncio.create_task(self._dispatch_from_queue(stop_event=self._stop_event))
|
||||
|
||||
super().__init__()
|
||||
|
||||
def stop(self, *args, **kwargs):
|
||||
self._stop_event.set()
|
||||
self._queue.put(None)
|
||||
|
||||
def dispatch(self, event: EventBase) -> None:
|
||||
self._queue.put(event)
|
||||
|
||||
async def _dispatch_from_queue(self, stop_event: threading.Event):
|
||||
"""Get events on from the queue and dispatch them, from the correct thread"""
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
event = self._queue.get(block=False)
|
||||
if not event: # Probably stopping
|
||||
continue
|
||||
dispatch(event, middleware_id=self.event_handler_id, payload_schema_dump=False)
|
||||
|
||||
except Empty:
|
||||
await asyncio.sleep(0.1)
|
||||
pass
|
||||
|
||||
except asyncio.CancelledError as e:
|
||||
raise e # Raise a proper error
|
@ -4,9 +4,6 @@ from typing import Optional
|
||||
|
||||
from PIL.Image import Image as PILImageType
|
||||
|
||||
from invokeai.app.invocations.fields import MetadataField
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||
|
||||
|
||||
class ImageFileStorageBase(ABC):
|
||||
"""Low-level service responsible for storing and retrieving image files."""
|
||||
@ -33,8 +30,9 @@ class ImageFileStorageBase(ABC):
|
||||
self,
|
||||
image: PILImageType,
|
||||
image_name: str,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
workflow: Optional[WorkflowWithoutID] = None,
|
||||
metadata: Optional[str] = None,
|
||||
workflow: Optional[str] = None,
|
||||
graph: Optional[str] = None,
|
||||
thumbnail_size: int = 256,
|
||||
) -> None:
|
||||
"""Saves an image and a 256x256 WEBP thumbnail. Returns a tuple of the image name, thumbnail name, and created timestamp."""
|
||||
@ -46,6 +44,11 @@ class ImageFileStorageBase(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_workflow(self, image_name: str) -> Optional[WorkflowWithoutID]:
|
||||
def get_workflow(self, image_name: str) -> Optional[str]:
|
||||
"""Gets the workflow of an image."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_graph(self, image_name: str) -> Optional[str]:
|
||||
"""Gets the graph of an image."""
|
||||
pass
|
||||
|
@ -7,9 +7,7 @@ from PIL import Image, PngImagePlugin
|
||||
from PIL.Image import Image as PILImageType
|
||||
from send2trash import send2trash
|
||||
|
||||
from invokeai.app.invocations.fields import MetadataField
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||
from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail
|
||||
|
||||
from .image_files_base import ImageFileStorageBase
|
||||
@ -56,8 +54,9 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
||||
self,
|
||||
image: PILImageType,
|
||||
image_name: str,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
workflow: Optional[WorkflowWithoutID] = None,
|
||||
metadata: Optional[str] = None,
|
||||
workflow: Optional[str] = None,
|
||||
graph: Optional[str] = None,
|
||||
thumbnail_size: int = 256,
|
||||
) -> None:
|
||||
try:
|
||||
@ -68,13 +67,14 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
||||
info_dict = {}
|
||||
|
||||
if metadata is not None:
|
||||
metadata_json = metadata.model_dump_json()
|
||||
info_dict["invokeai_metadata"] = metadata_json
|
||||
pnginfo.add_text("invokeai_metadata", metadata_json)
|
||||
info_dict["invokeai_metadata"] = metadata
|
||||
pnginfo.add_text("invokeai_metadata", metadata)
|
||||
if workflow is not None:
|
||||
workflow_json = workflow.model_dump_json()
|
||||
info_dict["invokeai_workflow"] = workflow_json
|
||||
pnginfo.add_text("invokeai_workflow", workflow_json)
|
||||
info_dict["invokeai_workflow"] = workflow
|
||||
pnginfo.add_text("invokeai_workflow", workflow)
|
||||
if graph is not None:
|
||||
info_dict["invokeai_graph"] = graph
|
||||
pnginfo.add_text("invokeai_graph", graph)
|
||||
|
||||
# When saving the image, the image object's info field is not populated. We need to set it
|
||||
image.info = info_dict
|
||||
@ -129,11 +129,18 @@ class DiskImageFileStorage(ImageFileStorageBase):
|
||||
path = path if isinstance(path, Path) else Path(path)
|
||||
return path.exists()
|
||||
|
||||
def get_workflow(self, image_name: str) -> WorkflowWithoutID | None:
|
||||
def get_workflow(self, image_name: str) -> str | None:
|
||||
image = self.get(image_name)
|
||||
workflow = image.info.get("invokeai_workflow", None)
|
||||
if workflow is not None:
|
||||
return WorkflowWithoutID.model_validate_json(workflow)
|
||||
if isinstance(workflow, str):
|
||||
return workflow
|
||||
return None
|
||||
|
||||
def get_graph(self, image_name: str) -> str | None:
|
||||
image = self.get(image_name)
|
||||
graph = image.info.get("invokeai_graph", None)
|
||||
if isinstance(graph, str):
|
||||
return graph
|
||||
return None
|
||||
|
||||
def __validate_storage_folders(self) -> None:
|
||||
|
@ -80,7 +80,7 @@ class ImageRecordStorageBase(ABC):
|
||||
starred: Optional[bool] = False,
|
||||
session_id: Optional[str] = None,
|
||||
node_id: Optional[str] = None,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
metadata: Optional[str] = None,
|
||||
) -> datetime:
|
||||
"""Saves an image record."""
|
||||
pass
|
||||
|
@ -328,10 +328,9 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
starred: Optional[bool] = False,
|
||||
session_id: Optional[str] = None,
|
||||
node_id: Optional[str] = None,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
metadata: Optional[str] = None,
|
||||
) -> datetime:
|
||||
try:
|
||||
metadata_json = metadata.model_dump_json() if metadata is not None else None
|
||||
self._lock.acquire()
|
||||
self._cursor.execute(
|
||||
"""--sql
|
||||
@ -358,7 +357,7 @@ class SqliteImageRecordStorage(ImageRecordStorageBase):
|
||||
height,
|
||||
node_id,
|
||||
session_id,
|
||||
metadata_json,
|
||||
metadata,
|
||||
is_intermediate,
|
||||
starred,
|
||||
has_workflow,
|
||||
|
@ -12,7 +12,6 @@ from invokeai.app.services.image_records.image_records_common import (
|
||||
)
|
||||
from invokeai.app.services.images.images_common import ImageDTO
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||
|
||||
|
||||
class ImageServiceABC(ABC):
|
||||
@ -51,8 +50,9 @@ class ImageServiceABC(ABC):
|
||||
session_id: Optional[str] = None,
|
||||
board_id: Optional[str] = None,
|
||||
is_intermediate: Optional[bool] = False,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
workflow: Optional[WorkflowWithoutID] = None,
|
||||
metadata: Optional[str] = None,
|
||||
workflow: Optional[str] = None,
|
||||
graph: Optional[str] = None,
|
||||
) -> ImageDTO:
|
||||
"""Creates an image, storing the file and its metadata."""
|
||||
pass
|
||||
@ -87,7 +87,12 @@ class ImageServiceABC(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_workflow(self, image_name: str) -> Optional[WorkflowWithoutID]:
|
||||
def get_workflow(self, image_name: str) -> Optional[str]:
|
||||
"""Gets an image's workflow."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_graph(self, image_name: str) -> Optional[str]:
|
||||
"""Gets an image's workflow."""
|
||||
pass
|
||||
|
||||
|
@ -5,7 +5,6 @@ from PIL.Image import Image as PILImageType
|
||||
from invokeai.app.invocations.fields import MetadataField
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.shared.pagination import OffsetPaginatedResults
|
||||
from invokeai.app.services.workflow_records.workflow_records_common import WorkflowWithoutID
|
||||
|
||||
from ..image_files.image_files_common import (
|
||||
ImageFileDeleteException,
|
||||
@ -42,8 +41,9 @@ class ImageService(ImageServiceABC):
|
||||
session_id: Optional[str] = None,
|
||||
board_id: Optional[str] = None,
|
||||
is_intermediate: Optional[bool] = False,
|
||||
metadata: Optional[MetadataField] = None,
|
||||
workflow: Optional[WorkflowWithoutID] = None,
|
||||
metadata: Optional[str] = None,
|
||||
workflow: Optional[str] = None,
|
||||
graph: Optional[str] = None,
|
||||
) -> ImageDTO:
|
||||
if image_origin not in ResourceOrigin:
|
||||
raise InvalidOriginException
|
||||
@ -64,7 +64,7 @@ class ImageService(ImageServiceABC):
|
||||
image_category=image_category,
|
||||
width=width,
|
||||
height=height,
|
||||
has_workflow=workflow is not None,
|
||||
has_workflow=workflow is not None or graph is not None,
|
||||
# Meta fields
|
||||
is_intermediate=is_intermediate,
|
||||
# Nullable fields
|
||||
@ -75,7 +75,7 @@ class ImageService(ImageServiceABC):
|
||||
if board_id is not None:
|
||||
self.__invoker.services.board_image_records.add_image_to_board(board_id=board_id, image_name=image_name)
|
||||
self.__invoker.services.image_files.save(
|
||||
image_name=image_name, image=image, metadata=metadata, workflow=workflow
|
||||
image_name=image_name, image=image, metadata=metadata, workflow=workflow, graph=graph
|
||||
)
|
||||
image_dto = self.get_dto(image_name)
|
||||
|
||||
@ -157,7 +157,7 @@ class ImageService(ImageServiceABC):
|
||||
self.__invoker.services.logger.error("Problem getting image metadata")
|
||||
raise e
|
||||
|
||||
def get_workflow(self, image_name: str) -> Optional[WorkflowWithoutID]:
|
||||
def get_workflow(self, image_name: str) -> Optional[str]:
|
||||
try:
|
||||
return self.__invoker.services.image_files.get_workflow(image_name)
|
||||
except ImageFileNotFoundException:
|
||||
@ -167,6 +167,16 @@ class ImageService(ImageServiceABC):
|
||||
self.__invoker.services.logger.error("Problem getting image workflow")
|
||||
raise
|
||||
|
||||
def get_graph(self, image_name: str) -> Optional[str]:
|
||||
try:
|
||||
return self.__invoker.services.image_files.get_graph(image_name)
|
||||
except ImageFileNotFoundException:
|
||||
self.__invoker.services.logger.error("Image file not found")
|
||||
raise
|
||||
except Exception:
|
||||
self.__invoker.services.logger.error("Problem getting image graph")
|
||||
raise
|
||||
|
||||
def get_path(self, image_name: str, thumbnail: bool = False) -> str:
|
||||
try:
|
||||
return str(self.__invoker.services.image_files.get_path(image_name, thumbnail))
|
||||
|
@ -1,11 +1,13 @@
|
||||
"""Initialization file for model install service package."""
|
||||
|
||||
from .model_install_base import (
|
||||
ModelInstallServiceBase,
|
||||
)
|
||||
from .model_install_common import (
|
||||
HFModelSource,
|
||||
InstallStatus,
|
||||
LocalModelSource,
|
||||
ModelInstallJob,
|
||||
ModelInstallServiceBase,
|
||||
ModelSource,
|
||||
UnknownInstallJobException,
|
||||
URLModelSource,
|
||||
|
@ -1,244 +1,19 @@
|
||||
# Copyright 2023 Lincoln D. Stein and the InvokeAI development team
|
||||
"""Baseclass definitions for the model installer."""
|
||||
|
||||
import re
|
||||
import traceback
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Literal, Optional, Set, Union
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field, PrivateAttr, field_validator
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.download import DownloadJob, DownloadQueueServiceBase
|
||||
from invokeai.app.services.download import DownloadQueueServiceBase
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.model_install.model_install_common import ModelInstallJob, ModelSource
|
||||
from invokeai.app.services.model_records import ModelRecordServiceBase
|
||||
from invokeai.backend.model_manager import AnyModelConfig, ModelRepoVariant
|
||||
from invokeai.backend.model_manager.config import ModelSourceType
|
||||
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata
|
||||
|
||||
|
||||
class InstallStatus(str, Enum):
|
||||
"""State of an install job running in the background."""
|
||||
|
||||
WAITING = "waiting" # waiting to be dequeued
|
||||
DOWNLOADING = "downloading" # downloading of model files in process
|
||||
DOWNLOADS_DONE = "downloads_done" # downloading done, waiting to run
|
||||
RUNNING = "running" # being processed
|
||||
COMPLETED = "completed" # finished running
|
||||
ERROR = "error" # terminated with an error message
|
||||
CANCELLED = "cancelled" # terminated with an error message
|
||||
|
||||
|
||||
class ModelInstallPart(BaseModel):
|
||||
url: AnyHttpUrl
|
||||
path: Path
|
||||
bytes: int = 0
|
||||
total_bytes: int = 0
|
||||
|
||||
|
||||
class UnknownInstallJobException(Exception):
|
||||
"""Raised when the status of an unknown job is requested."""
|
||||
|
||||
|
||||
class StringLikeSource(BaseModel):
|
||||
"""
|
||||
Base class for model sources, implements functions that lets the source be sorted and indexed.
|
||||
|
||||
These shenanigans let this stuff work:
|
||||
|
||||
source1 = LocalModelSource(path='C:/users/mort/foo.safetensors')
|
||||
mydict = {source1: 'model 1'}
|
||||
assert mydict['C:/users/mort/foo.safetensors'] == 'model 1'
|
||||
assert mydict[LocalModelSource(path='C:/users/mort/foo.safetensors')] == 'model 1'
|
||||
|
||||
source2 = LocalModelSource(path=Path('C:/users/mort/foo.safetensors'))
|
||||
assert source1 == source2
|
||||
assert source1 == 'C:/users/mort/foo.safetensors'
|
||||
"""
|
||||
|
||||
def __hash__(self) -> int:
|
||||
"""Return hash of the path field, for indexing."""
|
||||
return hash(str(self))
|
||||
|
||||
def __lt__(self, other: object) -> int:
|
||||
"""Return comparison of the stringified version, for sorting."""
|
||||
return str(self) < str(other)
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
"""Return equality on the stringified version."""
|
||||
if isinstance(other, Path):
|
||||
return str(self) == other.as_posix()
|
||||
else:
|
||||
return str(self) == str(other)
|
||||
|
||||
|
||||
class LocalModelSource(StringLikeSource):
|
||||
"""A local file or directory path."""
|
||||
|
||||
path: str | Path
|
||||
inplace: Optional[bool] = False
|
||||
type: Literal["local"] = "local"
|
||||
|
||||
# these methods allow the source to be used in a string-like way,
|
||||
# for example as an index into a dict
|
||||
def __str__(self) -> str:
|
||||
"""Return string version of path when string rep needed."""
|
||||
return Path(self.path).as_posix()
|
||||
|
||||
|
||||
class HFModelSource(StringLikeSource):
|
||||
"""
|
||||
A HuggingFace repo_id with optional variant, sub-folder and access token.
|
||||
Note that the variant option, if not provided to the constructor, will default to fp16, which is
|
||||
what people (almost) always want.
|
||||
"""
|
||||
|
||||
repo_id: str
|
||||
variant: Optional[ModelRepoVariant] = ModelRepoVariant.FP16
|
||||
subfolder: Optional[Path] = None
|
||||
access_token: Optional[str] = None
|
||||
type: Literal["hf"] = "hf"
|
||||
|
||||
@field_validator("repo_id")
|
||||
@classmethod
|
||||
def proper_repo_id(cls, v: str) -> str: # noqa D102
|
||||
if not re.match(r"^([.\w-]+/[.\w-]+)$", v):
|
||||
raise ValueError(f"{v}: invalid repo_id format")
|
||||
return v
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Return string version of repoid when string rep needed."""
|
||||
base: str = self.repo_id
|
||||
if self.variant:
|
||||
base += f":{self.variant or ''}"
|
||||
if self.subfolder:
|
||||
base += f":{self.subfolder}"
|
||||
return base
|
||||
|
||||
|
||||
class URLModelSource(StringLikeSource):
|
||||
"""A generic URL point to a checkpoint file."""
|
||||
|
||||
url: AnyHttpUrl
|
||||
access_token: Optional[str] = None
|
||||
type: Literal["url"] = "url"
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Return string version of the url when string rep needed."""
|
||||
return str(self.url)
|
||||
|
||||
|
||||
ModelSource = Annotated[Union[LocalModelSource, HFModelSource, URLModelSource], Field(discriminator="type")]
|
||||
|
||||
MODEL_SOURCE_TO_TYPE_MAP = {
|
||||
URLModelSource: ModelSourceType.Url,
|
||||
HFModelSource: ModelSourceType.HFRepoID,
|
||||
LocalModelSource: ModelSourceType.Path,
|
||||
}
|
||||
|
||||
|
||||
class ModelInstallJob(BaseModel):
|
||||
"""Object that tracks the current status of an install request."""
|
||||
|
||||
id: int = Field(description="Unique ID for this job")
|
||||
status: InstallStatus = Field(default=InstallStatus.WAITING, description="Current status of install process")
|
||||
error_reason: Optional[str] = Field(default=None, description="Information about why the job failed")
|
||||
config_in: Dict[str, Any] = Field(
|
||||
default_factory=dict, description="Configuration information (e.g. 'description') to apply to model."
|
||||
)
|
||||
config_out: Optional[AnyModelConfig] = Field(
|
||||
default=None, description="After successful installation, this will hold the configuration object."
|
||||
)
|
||||
inplace: bool = Field(
|
||||
default=False, description="Leave model in its current location; otherwise install under models directory"
|
||||
)
|
||||
source: ModelSource = Field(description="Source (URL, repo_id, or local path) of model")
|
||||
local_path: Path = Field(description="Path to locally-downloaded model; may be the same as the source")
|
||||
bytes: int = Field(
|
||||
default=0, description="For a remote model, the number of bytes downloaded so far (may not be available)"
|
||||
)
|
||||
total_bytes: int = Field(default=0, description="Total size of the model to be installed")
|
||||
source_metadata: Optional[AnyModelRepoMetadata] = Field(
|
||||
default=None, description="Metadata provided by the model source"
|
||||
)
|
||||
download_parts: Set[DownloadJob] = Field(
|
||||
default_factory=set, description="Download jobs contributing to this install"
|
||||
)
|
||||
error: Optional[str] = Field(
|
||||
default=None, description="On an error condition, this field will contain the text of the exception"
|
||||
)
|
||||
error_traceback: Optional[str] = Field(
|
||||
default=None, description="On an error condition, this field will contain the exception traceback"
|
||||
)
|
||||
# internal flags and transitory settings
|
||||
_install_tmpdir: Optional[Path] = PrivateAttr(default=None)
|
||||
_exception: Optional[Exception] = PrivateAttr(default=None)
|
||||
|
||||
def set_error(self, e: Exception) -> None:
|
||||
"""Record the error and traceback from an exception."""
|
||||
self._exception = e
|
||||
self.error = str(e)
|
||||
self.error_traceback = self._format_error(e)
|
||||
self.status = InstallStatus.ERROR
|
||||
self.error_reason = self._exception.__class__.__name__ if self._exception else None
|
||||
|
||||
def cancel(self) -> None:
|
||||
"""Call to cancel the job."""
|
||||
self.status = InstallStatus.CANCELLED
|
||||
|
||||
@property
|
||||
def error_type(self) -> Optional[str]:
|
||||
"""Class name of the exception that led to status==ERROR."""
|
||||
return self._exception.__class__.__name__ if self._exception else None
|
||||
|
||||
def _format_error(self, exception: Exception) -> str:
|
||||
"""Error traceback."""
|
||||
return "".join(traceback.format_exception(exception))
|
||||
|
||||
@property
|
||||
def cancelled(self) -> bool:
|
||||
"""Set status to CANCELLED."""
|
||||
return self.status == InstallStatus.CANCELLED
|
||||
|
||||
@property
|
||||
def errored(self) -> bool:
|
||||
"""Return true if job has errored."""
|
||||
return self.status == InstallStatus.ERROR
|
||||
|
||||
@property
|
||||
def waiting(self) -> bool:
|
||||
"""Return true if job is waiting to run."""
|
||||
return self.status == InstallStatus.WAITING
|
||||
|
||||
@property
|
||||
def downloading(self) -> bool:
|
||||
"""Return true if job is downloading."""
|
||||
return self.status == InstallStatus.DOWNLOADING
|
||||
|
||||
@property
|
||||
def downloads_done(self) -> bool:
|
||||
"""Return true if job's downloads ae done."""
|
||||
return self.status == InstallStatus.DOWNLOADS_DONE
|
||||
|
||||
@property
|
||||
def running(self) -> bool:
|
||||
"""Return true if job is running."""
|
||||
return self.status == InstallStatus.RUNNING
|
||||
|
||||
@property
|
||||
def complete(self) -> bool:
|
||||
"""Return true if job completed without errors."""
|
||||
return self.status == InstallStatus.COMPLETED
|
||||
|
||||
@property
|
||||
def in_terminal_state(self) -> bool:
|
||||
"""Return true if job is in a terminal state."""
|
||||
return self.status in [InstallStatus.COMPLETED, InstallStatus.ERROR, InstallStatus.CANCELLED]
|
||||
from invokeai.backend.model_manager.config import AnyModelConfig
|
||||
|
||||
|
||||
class ModelInstallServiceBase(ABC):
|
||||
@ -282,7 +57,7 @@ class ModelInstallServiceBase(ABC):
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def event_bus(self) -> Optional[EventServiceBase]:
|
||||
def event_bus(self) -> Optional["EventServiceBase"]:
|
||||
"""Return the event service base object associated with the installer."""
|
||||
|
||||
@abstractmethod
|
||||
|
233
invokeai/app/services/model_install/model_install_common.py
Normal file
233
invokeai/app/services/model_install/model_install_common.py
Normal file
@ -0,0 +1,233 @@
|
||||
import re
|
||||
import traceback
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Literal, Optional, Set, Union
|
||||
|
||||
from pydantic import BaseModel, Field, PrivateAttr, field_validator
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from invokeai.app.services.download import DownloadJob
|
||||
from invokeai.backend.model_manager import AnyModelConfig, ModelRepoVariant
|
||||
from invokeai.backend.model_manager.config import ModelSourceType
|
||||
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata
|
||||
|
||||
|
||||
class InstallStatus(str, Enum):
|
||||
"""State of an install job running in the background."""
|
||||
|
||||
WAITING = "waiting" # waiting to be dequeued
|
||||
DOWNLOADING = "downloading" # downloading of model files in process
|
||||
DOWNLOADS_DONE = "downloads_done" # downloading done, waiting to run
|
||||
RUNNING = "running" # being processed
|
||||
COMPLETED = "completed" # finished running
|
||||
ERROR = "error" # terminated with an error message
|
||||
CANCELLED = "cancelled" # terminated with an error message
|
||||
|
||||
|
||||
class ModelInstallPart(BaseModel):
|
||||
url: AnyHttpUrl
|
||||
path: Path
|
||||
bytes: int = 0
|
||||
total_bytes: int = 0
|
||||
|
||||
|
||||
class UnknownInstallJobException(Exception):
|
||||
"""Raised when the status of an unknown job is requested."""
|
||||
|
||||
|
||||
class StringLikeSource(BaseModel):
|
||||
"""
|
||||
Base class for model sources, implements functions that lets the source be sorted and indexed.
|
||||
|
||||
These shenanigans let this stuff work:
|
||||
|
||||
source1 = LocalModelSource(path='C:/users/mort/foo.safetensors')
|
||||
mydict = {source1: 'model 1'}
|
||||
assert mydict['C:/users/mort/foo.safetensors'] == 'model 1'
|
||||
assert mydict[LocalModelSource(path='C:/users/mort/foo.safetensors')] == 'model 1'
|
||||
|
||||
source2 = LocalModelSource(path=Path('C:/users/mort/foo.safetensors'))
|
||||
assert source1 == source2
|
||||
assert source1 == 'C:/users/mort/foo.safetensors'
|
||||
"""
|
||||
|
||||
def __hash__(self) -> int:
|
||||
"""Return hash of the path field, for indexing."""
|
||||
return hash(str(self))
|
||||
|
||||
def __lt__(self, other: object) -> int:
|
||||
"""Return comparison of the stringified version, for sorting."""
|
||||
return str(self) < str(other)
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
"""Return equality on the stringified version."""
|
||||
if isinstance(other, Path):
|
||||
return str(self) == other.as_posix()
|
||||
else:
|
||||
return str(self) == str(other)
|
||||
|
||||
|
||||
class LocalModelSource(StringLikeSource):
|
||||
"""A local file or directory path."""
|
||||
|
||||
path: str | Path
|
||||
inplace: Optional[bool] = False
|
||||
type: Literal["local"] = "local"
|
||||
|
||||
# these methods allow the source to be used in a string-like way,
|
||||
# for example as an index into a dict
|
||||
def __str__(self) -> str:
|
||||
"""Return string version of path when string rep needed."""
|
||||
return Path(self.path).as_posix()
|
||||
|
||||
|
||||
class HFModelSource(StringLikeSource):
|
||||
"""
|
||||
A HuggingFace repo_id with optional variant, sub-folder and access token.
|
||||
Note that the variant option, if not provided to the constructor, will default to fp16, which is
|
||||
what people (almost) always want.
|
||||
"""
|
||||
|
||||
repo_id: str
|
||||
variant: Optional[ModelRepoVariant] = ModelRepoVariant.FP16
|
||||
subfolder: Optional[Path] = None
|
||||
access_token: Optional[str] = None
|
||||
type: Literal["hf"] = "hf"
|
||||
|
||||
@field_validator("repo_id")
|
||||
@classmethod
|
||||
def proper_repo_id(cls, v: str) -> str: # noqa D102
|
||||
if not re.match(r"^([.\w-]+/[.\w-]+)$", v):
|
||||
raise ValueError(f"{v}: invalid repo_id format")
|
||||
return v
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Return string version of repoid when string rep needed."""
|
||||
base: str = self.repo_id
|
||||
if self.variant:
|
||||
base += f":{self.variant or ''}"
|
||||
if self.subfolder:
|
||||
base += f":{self.subfolder}"
|
||||
return base
|
||||
|
||||
|
||||
class URLModelSource(StringLikeSource):
|
||||
"""A generic URL point to a checkpoint file."""
|
||||
|
||||
url: AnyHttpUrl
|
||||
access_token: Optional[str] = None
|
||||
type: Literal["url"] = "url"
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Return string version of the url when string rep needed."""
|
||||
return str(self.url)
|
||||
|
||||
|
||||
ModelSource = Annotated[Union[LocalModelSource, HFModelSource, URLModelSource], Field(discriminator="type")]
|
||||
|
||||
MODEL_SOURCE_TO_TYPE_MAP = {
|
||||
URLModelSource: ModelSourceType.Url,
|
||||
HFModelSource: ModelSourceType.HFRepoID,
|
||||
LocalModelSource: ModelSourceType.Path,
|
||||
}
|
||||
|
||||
|
||||
class ModelInstallJob(BaseModel):
|
||||
"""Object that tracks the current status of an install request."""
|
||||
|
||||
id: int = Field(description="Unique ID for this job")
|
||||
status: InstallStatus = Field(default=InstallStatus.WAITING, description="Current status of install process")
|
||||
error_reason: Optional[str] = Field(default=None, description="Information about why the job failed")
|
||||
config_in: Dict[str, Any] = Field(
|
||||
default_factory=dict, description="Configuration information (e.g. 'description') to apply to model."
|
||||
)
|
||||
config_out: Optional[AnyModelConfig] = Field(
|
||||
default=None, description="After successful installation, this will hold the configuration object."
|
||||
)
|
||||
inplace: bool = Field(
|
||||
default=False, description="Leave model in its current location; otherwise install under models directory"
|
||||
)
|
||||
source: ModelSource = Field(description="Source (URL, repo_id, or local path) of model")
|
||||
local_path: Path = Field(description="Path to locally-downloaded model; may be the same as the source")
|
||||
bytes: int = Field(
|
||||
default=0, description="For a remote model, the number of bytes downloaded so far (may not be available)"
|
||||
)
|
||||
total_bytes: int = Field(default=0, description="Total size of the model to be installed")
|
||||
source_metadata: Optional[AnyModelRepoMetadata] = Field(
|
||||
default=None, description="Metadata provided by the model source"
|
||||
)
|
||||
download_parts: Set[DownloadJob] = Field(
|
||||
default_factory=set, description="Download jobs contributing to this install"
|
||||
)
|
||||
error: Optional[str] = Field(
|
||||
default=None, description="On an error condition, this field will contain the text of the exception"
|
||||
)
|
||||
error_traceback: Optional[str] = Field(
|
||||
default=None, description="On an error condition, this field will contain the exception traceback"
|
||||
)
|
||||
# internal flags and transitory settings
|
||||
_install_tmpdir: Optional[Path] = PrivateAttr(default=None)
|
||||
_exception: Optional[Exception] = PrivateAttr(default=None)
|
||||
|
||||
def set_error(self, e: Exception) -> None:
|
||||
"""Record the error and traceback from an exception."""
|
||||
self._exception = e
|
||||
self.error = str(e)
|
||||
self.error_traceback = self._format_error(e)
|
||||
self.status = InstallStatus.ERROR
|
||||
self.error_reason = self._exception.__class__.__name__ if self._exception else None
|
||||
|
||||
def cancel(self) -> None:
|
||||
"""Call to cancel the job."""
|
||||
self.status = InstallStatus.CANCELLED
|
||||
|
||||
@property
|
||||
def error_type(self) -> Optional[str]:
|
||||
"""Class name of the exception that led to status==ERROR."""
|
||||
return self._exception.__class__.__name__ if self._exception else None
|
||||
|
||||
def _format_error(self, exception: Exception) -> str:
|
||||
"""Error traceback."""
|
||||
return "".join(traceback.format_exception(exception))
|
||||
|
||||
@property
|
||||
def cancelled(self) -> bool:
|
||||
"""Set status to CANCELLED."""
|
||||
return self.status == InstallStatus.CANCELLED
|
||||
|
||||
@property
|
||||
def errored(self) -> bool:
|
||||
"""Return true if job has errored."""
|
||||
return self.status == InstallStatus.ERROR
|
||||
|
||||
@property
|
||||
def waiting(self) -> bool:
|
||||
"""Return true if job is waiting to run."""
|
||||
return self.status == InstallStatus.WAITING
|
||||
|
||||
@property
|
||||
def downloading(self) -> bool:
|
||||
"""Return true if job is downloading."""
|
||||
return self.status == InstallStatus.DOWNLOADING
|
||||
|
||||
@property
|
||||
def downloads_done(self) -> bool:
|
||||
"""Return true if job's downloads ae done."""
|
||||
return self.status == InstallStatus.DOWNLOADS_DONE
|
||||
|
||||
@property
|
||||
def running(self) -> bool:
|
||||
"""Return true if job is running."""
|
||||
return self.status == InstallStatus.RUNNING
|
||||
|
||||
@property
|
||||
def complete(self) -> bool:
|
||||
"""Return true if job completed without errors."""
|
||||
return self.status == InstallStatus.COMPLETED
|
||||
|
||||
@property
|
||||
def in_terminal_state(self) -> bool:
|
||||
"""Return true if job is in a terminal state."""
|
||||
return self.status in [InstallStatus.COMPLETED, InstallStatus.ERROR, InstallStatus.CANCELLED]
|
@ -3,7 +3,6 @@
|
||||
import locale
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import threading
|
||||
import time
|
||||
from hashlib import sha256
|
||||
@ -11,8 +10,9 @@ from pathlib import Path
|
||||
from queue import Empty, Queue
|
||||
from shutil import copyfile, copytree, move, rmtree
|
||||
from tempfile import mkdtemp
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
|
||||
|
||||
import torch
|
||||
import yaml
|
||||
from huggingface_hub import HfFolder
|
||||
from pydantic.networks import AnyHttpUrl
|
||||
@ -20,8 +20,8 @@ from requests import Session
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.download import DownloadJob, DownloadQueueServiceBase, TqdmProgress
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.model_install.model_install_base import ModelInstallServiceBase
|
||||
from invokeai.app.services.model_records import DuplicateModelException, ModelRecordServiceBase
|
||||
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
|
||||
from invokeai.backend.model_manager.config import (
|
||||
@ -42,15 +42,15 @@ from invokeai.backend.model_manager.metadata.metadata_base import HuggingFaceMet
|
||||
from invokeai.backend.model_manager.probe import ModelProbe
|
||||
from invokeai.backend.model_manager.search import ModelSearch
|
||||
from invokeai.backend.util import InvokeAILogger
|
||||
from invokeai.backend.util.devices import choose_precision, choose_torch_device
|
||||
from invokeai.backend.util.catch_sigint import catch_sigint
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
from .model_install_base import (
|
||||
from .model_install_common import (
|
||||
MODEL_SOURCE_TO_TYPE_MAP,
|
||||
HFModelSource,
|
||||
InstallStatus,
|
||||
LocalModelSource,
|
||||
ModelInstallJob,
|
||||
ModelInstallServiceBase,
|
||||
ModelSource,
|
||||
StringLikeSource,
|
||||
URLModelSource,
|
||||
@ -58,6 +58,9 @@ from .model_install_base import (
|
||||
|
||||
TMPDIR_PREFIX = "tmpinstall_"
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
|
||||
|
||||
class ModelInstallService(ModelInstallServiceBase):
|
||||
"""class for InvokeAI model installation."""
|
||||
@ -67,7 +70,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
app_config: InvokeAIAppConfig,
|
||||
record_store: ModelRecordServiceBase,
|
||||
download_queue: DownloadQueueServiceBase,
|
||||
event_bus: Optional[EventServiceBase] = None,
|
||||
event_bus: Optional["EventServiceBase"] = None,
|
||||
session: Optional[Session] = None,
|
||||
):
|
||||
"""
|
||||
@ -103,7 +106,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
return self._record_store
|
||||
|
||||
@property
|
||||
def event_bus(self) -> Optional[EventServiceBase]: # noqa D102
|
||||
def event_bus(self) -> Optional["EventServiceBase"]: # noqa D102
|
||||
return self._event_bus
|
||||
|
||||
# make the invoker optional here because we don't need it and it
|
||||
@ -111,17 +114,6 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
def start(self, invoker: Optional[Invoker] = None) -> None:
|
||||
"""Start the installer thread."""
|
||||
|
||||
# Yes, this is weird. When the installer thread is running, the
|
||||
# thread masks the ^C signal. When we receive a
|
||||
# sigINT, we stop the thread, reset sigINT, and send a new
|
||||
# sigINT to the parent process.
|
||||
def sigint_handler(signum, frame):
|
||||
self.stop()
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
signal.raise_signal(signal.SIGINT)
|
||||
|
||||
signal.signal(signal.SIGINT, sigint_handler)
|
||||
|
||||
with self._lock:
|
||||
if self._running:
|
||||
raise Exception("Attempt to start the installer service twice")
|
||||
@ -131,7 +123,8 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
# In normal use, we do not want to scan the models directory - it should never have orphaned models.
|
||||
# We should only do the scan when the flag is set (which should only be set when testing).
|
||||
if self.app_config.scan_models_on_startup:
|
||||
self._register_orphaned_models()
|
||||
with catch_sigint():
|
||||
self._register_orphaned_models()
|
||||
|
||||
# Check all models' paths and confirm they exist. A model could be missing if it was installed on a volume
|
||||
# that isn't currently mounted. In this case, we don't want to delete the model from the database, but we do
|
||||
@ -634,11 +627,10 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
self._next_job_id += 1
|
||||
return id
|
||||
|
||||
@staticmethod
|
||||
def _guess_variant() -> Optional[ModelRepoVariant]:
|
||||
def _guess_variant(self) -> Optional[ModelRepoVariant]:
|
||||
"""Guess the best HuggingFace variant type to download."""
|
||||
precision = choose_precision(choose_torch_device())
|
||||
return ModelRepoVariant.FP16 if precision == "float16" else None
|
||||
precision = TorchDevice.choose_torch_dtype()
|
||||
return ModelRepoVariant.FP16 if precision == torch.float16 else None
|
||||
|
||||
def _import_local_model(self, source: LocalModelSource, config: Optional[Dict[str, Any]]) -> ModelInstallJob:
|
||||
return ModelInstallJob(
|
||||
@ -754,6 +746,8 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
self._download_cache[download_job.source] = install_job # matches a download job to an install job
|
||||
install_job.download_parts.add(download_job)
|
||||
|
||||
# only start the jobs once install_job.download_parts is fully populated
|
||||
for download_job in install_job.download_parts:
|
||||
self._download_queue.submit_download_job(
|
||||
download_job,
|
||||
on_start=self._download_started_callback,
|
||||
@ -762,6 +756,7 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
on_error=self._download_error_callback,
|
||||
on_cancelled=self._download_cancelled_callback,
|
||||
)
|
||||
|
||||
return install_job
|
||||
|
||||
def _stat_size(self, path: Path) -> int:
|
||||
@ -862,35 +857,17 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
job.status = InstallStatus.RUNNING
|
||||
self._logger.info(f"Model install started: {job.source}")
|
||||
if self._event_bus:
|
||||
self._event_bus.emit_model_install_running(str(job.source))
|
||||
self._event_bus.emit_model_install_started(job)
|
||||
|
||||
def _signal_job_downloading(self, job: ModelInstallJob) -> None:
|
||||
if self._event_bus:
|
||||
parts: List[Dict[str, str | int]] = [
|
||||
{
|
||||
"url": str(x.source),
|
||||
"local_path": str(x.download_path),
|
||||
"bytes": x.bytes,
|
||||
"total_bytes": x.total_bytes,
|
||||
}
|
||||
for x in job.download_parts
|
||||
]
|
||||
assert job.bytes is not None
|
||||
assert job.total_bytes is not None
|
||||
self._event_bus.emit_model_install_downloading(
|
||||
str(job.source),
|
||||
local_path=job.local_path.as_posix(),
|
||||
parts=parts,
|
||||
bytes=job.bytes,
|
||||
total_bytes=job.total_bytes,
|
||||
id=job.id,
|
||||
)
|
||||
self._event_bus.emit_model_install_download_progress(job)
|
||||
|
||||
def _signal_job_downloads_done(self, job: ModelInstallJob) -> None:
|
||||
job.status = InstallStatus.DOWNLOADS_DONE
|
||||
self._logger.info(f"Model download complete: {job.source}")
|
||||
if self._event_bus:
|
||||
self._event_bus.emit_model_install_downloads_done(str(job.source))
|
||||
self._event_bus.emit_model_install_downloads_complete(job)
|
||||
|
||||
def _signal_job_completed(self, job: ModelInstallJob) -> None:
|
||||
job.status = InstallStatus.COMPLETED
|
||||
@ -898,24 +875,19 @@ class ModelInstallService(ModelInstallServiceBase):
|
||||
self._logger.info(f"Model install complete: {job.source}")
|
||||
self._logger.debug(f"{job.local_path} registered key {job.config_out.key}")
|
||||
if self._event_bus:
|
||||
assert job.local_path is not None
|
||||
assert job.config_out is not None
|
||||
key = job.config_out.key
|
||||
self._event_bus.emit_model_install_completed(str(job.source), key, id=job.id)
|
||||
self._event_bus.emit_model_install_complete(job)
|
||||
|
||||
def _signal_job_errored(self, job: ModelInstallJob) -> None:
|
||||
self._logger.error(f"Model install error: {job.source}\n{job.error_type}: {job.error}")
|
||||
if self._event_bus:
|
||||
error_type = job.error_type
|
||||
error = job.error
|
||||
assert error_type is not None
|
||||
assert error is not None
|
||||
self._event_bus.emit_model_install_error(str(job.source), error_type, error, id=job.id)
|
||||
assert job.error_type is not None
|
||||
assert job.error is not None
|
||||
self._event_bus.emit_model_install_error(job)
|
||||
|
||||
def _signal_job_cancelled(self, job: ModelInstallJob) -> None:
|
||||
self._logger.info(f"Model install canceled: {job.source}")
|
||||
if self._event_bus:
|
||||
self._event_bus.emit_model_install_cancelled(str(job.source), id=job.id)
|
||||
self._event_bus.emit_model_install_cancelled(job)
|
||||
|
||||
@staticmethod
|
||||
def get_fetcher_from_url(url: str) -> ModelMetadataFetchBase:
|
||||
|
@ -4,7 +4,6 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional
|
||||
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContextData
|
||||
from invokeai.backend.model_manager import AnyModel, AnyModelConfig, SubModelType
|
||||
from invokeai.backend.model_manager.load import LoadedModel
|
||||
from invokeai.backend.model_manager.load.convert_cache import ModelConvertCacheBase
|
||||
@ -15,18 +14,12 @@ class ModelLoadServiceBase(ABC):
|
||||
"""Wrapper around AnyModelLoader."""
|
||||
|
||||
@abstractmethod
|
||||
def load_model(
|
||||
self,
|
||||
model_config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
context_data: Optional[InvocationContextData] = None,
|
||||
) -> LoadedModel:
|
||||
def load_model(self, model_config: AnyModelConfig, submodel_type: Optional[SubModelType] = None) -> LoadedModel:
|
||||
"""
|
||||
Given a model's configuration, load it and return the LoadedModel object.
|
||||
|
||||
:param model_config: Model configuration record (as returned by ModelRecordBase.get_model())
|
||||
:param submodel: For main (pipeline models), the submodel to fetch.
|
||||
:param context_data: Invocation context data used for event reporting
|
||||
"""
|
||||
|
||||
@property
|
||||
|
@ -5,7 +5,6 @@ from typing import Optional, Type
|
||||
|
||||
from invokeai.app.services.config import InvokeAIAppConfig
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContextData
|
||||
from invokeai.backend.model_manager import AnyModel, AnyModelConfig, SubModelType
|
||||
from invokeai.backend.model_manager.load import (
|
||||
LoadedModel,
|
||||
@ -51,25 +50,18 @@ class ModelLoadService(ModelLoadServiceBase):
|
||||
"""Return the checkpoint convert cache used by this loader."""
|
||||
return self._convert_cache
|
||||
|
||||
def load_model(
|
||||
self,
|
||||
model_config: AnyModelConfig,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
context_data: Optional[InvocationContextData] = None,
|
||||
) -> LoadedModel:
|
||||
def load_model(self, model_config: AnyModelConfig, submodel_type: Optional[SubModelType] = None) -> LoadedModel:
|
||||
"""
|
||||
Given a model's configuration, load it and return the LoadedModel object.
|
||||
|
||||
:param model_config: Model configuration record (as returned by ModelRecordBase.get_model())
|
||||
:param submodel: For main (pipeline models), the submodel to fetch.
|
||||
:param context: Invocation context used for event reporting
|
||||
"""
|
||||
if context_data:
|
||||
self._emit_load_event(
|
||||
context_data=context_data,
|
||||
model_config=model_config,
|
||||
submodel_type=submodel_type,
|
||||
)
|
||||
|
||||
# We don't have an invoker during testing
|
||||
# TODO(psyche): Mock this method on the invoker in the tests
|
||||
if hasattr(self, "_invoker"):
|
||||
self._invoker.services.events.emit_model_load_started(model_config, submodel_type)
|
||||
|
||||
implementation, model_config, submodel_type = self._registry.get_implementation(model_config, submodel_type) # type: ignore
|
||||
loaded_model: LoadedModel = implementation(
|
||||
@ -79,40 +71,7 @@ class ModelLoadService(ModelLoadServiceBase):
|
||||
convert_cache=self._convert_cache,
|
||||
).load_model(model_config, submodel_type)
|
||||
|
||||
if context_data:
|
||||
self._emit_load_event(
|
||||
context_data=context_data,
|
||||
model_config=model_config,
|
||||
submodel_type=submodel_type,
|
||||
loaded=True,
|
||||
)
|
||||
if hasattr(self, "_invoker"):
|
||||
self._invoker.services.events.emit_model_load_started(model_config, submodel_type)
|
||||
|
||||
return loaded_model
|
||||
|
||||
def _emit_load_event(
|
||||
self,
|
||||
context_data: InvocationContextData,
|
||||
model_config: AnyModelConfig,
|
||||
loaded: Optional[bool] = False,
|
||||
submodel_type: Optional[SubModelType] = None,
|
||||
) -> None:
|
||||
if not self._invoker:
|
||||
return
|
||||
|
||||
if not loaded:
|
||||
self._invoker.services.events.emit_model_load_started(
|
||||
queue_id=context_data.queue_item.queue_id,
|
||||
queue_item_id=context_data.queue_item.item_id,
|
||||
queue_batch_id=context_data.queue_item.batch_id,
|
||||
graph_execution_state_id=context_data.queue_item.session_id,
|
||||
model_config=model_config,
|
||||
submodel_type=submodel_type,
|
||||
)
|
||||
else:
|
||||
self._invoker.services.events.emit_model_load_completed(
|
||||
queue_id=context_data.queue_item.queue_id,
|
||||
queue_item_id=context_data.queue_item.item_id,
|
||||
queue_batch_id=context_data.queue_item.batch_id,
|
||||
graph_execution_state_id=context_data.queue_item.session_id,
|
||||
model_config=model_config,
|
||||
submodel_type=submodel_type,
|
||||
)
|
||||
|
@ -1,12 +1,14 @@
|
||||
# Copyright (c) 2023 Lincoln D. Stein and the InvokeAI Team
|
||||
"""Implementation of ModelManagerServiceBase."""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from typing_extensions import Self
|
||||
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.backend.model_manager.load import ModelCache, ModelConvertCache, ModelLoaderRegistry
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
from ..config import InvokeAIAppConfig
|
||||
@ -67,7 +69,7 @@ class ModelManagerService(ModelManagerServiceBase):
|
||||
model_record_service: ModelRecordServiceBase,
|
||||
download_queue: DownloadQueueServiceBase,
|
||||
events: EventServiceBase,
|
||||
execution_device: torch.device = choose_torch_device(),
|
||||
execution_device: Optional[torch.device] = None,
|
||||
) -> Self:
|
||||
"""
|
||||
Construct the model manager service instance.
|
||||
@ -82,7 +84,7 @@ class ModelManagerService(ModelManagerServiceBase):
|
||||
max_vram_cache_size=app_config.vram,
|
||||
lazy_offloading=app_config.lazy_offload,
|
||||
logger=logger,
|
||||
execution_device=execution_device,
|
||||
execution_device=execution_device or TorchDevice.choose_torch_device(),
|
||||
)
|
||||
convert_cache = ModelConvertCache(cache_path=app_config.convert_cache_path, max_size=app_config.convert_cache)
|
||||
loader = ModelLoadService(
|
||||
|
@ -1,6 +1,6 @@
|
||||
import shutil
|
||||
import tempfile
|
||||
import typing
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Optional, TypeVar
|
||||
|
||||
@ -17,12 +17,6 @@ if TYPE_CHECKING:
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
@dataclass
|
||||
class DeleteAllResult:
|
||||
deleted_count: int
|
||||
freed_space_bytes: float
|
||||
|
||||
|
||||
class ObjectSerializerDisk(ObjectSerializerBase[T]):
|
||||
"""Disk-backed storage for arbitrary python objects. Serialization is handled by `torch.save` and `torch.load`.
|
||||
|
||||
@ -35,6 +29,12 @@ class ObjectSerializerDisk(ObjectSerializerBase[T]):
|
||||
self._ephemeral = ephemeral
|
||||
self._base_output_dir = output_dir
|
||||
self._base_output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if self._ephemeral:
|
||||
# Remove dangling tempdirs that might have been left over from an earlier unplanned shutdown.
|
||||
for temp_dir in filter(Path.is_dir, self._base_output_dir.glob("tmp*")):
|
||||
shutil.rmtree(temp_dir)
|
||||
|
||||
# Must specify `ignore_cleanup_errors` to avoid fatal errors during cleanup on Windows
|
||||
self._tempdir = (
|
||||
tempfile.TemporaryDirectory(dir=self._base_output_dir, ignore_cleanup_errors=True) if ephemeral else None
|
||||
|
@ -4,11 +4,16 @@ from threading import BoundedSemaphore, Thread
|
||||
from threading import Event as ThreadEvent
|
||||
from typing import Optional
|
||||
|
||||
from fastapi_events.handlers.local import local_handler
|
||||
from fastapi_events.typing import Event as FastAPIEvent
|
||||
|
||||
from invokeai.app.invocations.baseinvocation import BaseInvocation
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
from invokeai.app.services.events.events_common import (
|
||||
BatchEnqueuedEvent,
|
||||
FastAPIEvent,
|
||||
QueueClearedEvent,
|
||||
QueueEventBase,
|
||||
QueueItemStatusChangedEvent,
|
||||
SessionCanceledEvent,
|
||||
register_events,
|
||||
)
|
||||
from invokeai.app.services.invocation_stats.invocation_stats_common import GESStatsNotFoundError
|
||||
from invokeai.app.services.session_processor.session_processor_common import CanceledException
|
||||
from invokeai.app.services.session_queue.session_queue_common import SessionQueueItem
|
||||
@ -31,8 +36,6 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
self._poll_now_event = ThreadEvent()
|
||||
self._cancel_event = ThreadEvent()
|
||||
|
||||
local_handler.register(event_name=EventServiceBase.queue_event, _func=self._on_queue_event)
|
||||
|
||||
self._thread_limit = thread_limit
|
||||
self._thread_semaphore = BoundedSemaphore(thread_limit)
|
||||
self._polling_interval = polling_interval
|
||||
@ -49,6 +52,8 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
else None
|
||||
)
|
||||
|
||||
register_events({SessionCanceledEvent, QueueClearedEvent, BatchEnqueuedEvent}, self._on_queue_event)
|
||||
|
||||
self._thread = Thread(
|
||||
name="session_processor",
|
||||
target=self._process,
|
||||
@ -67,30 +72,25 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
def _poll_now(self) -> None:
|
||||
self._poll_now_event.set()
|
||||
|
||||
async def _on_queue_event(self, event: FastAPIEvent) -> None:
|
||||
event_name = event[1]["event"]
|
||||
|
||||
async def _on_queue_event(self, event: FastAPIEvent[QueueEventBase]) -> None:
|
||||
_event_name, payload = event
|
||||
if (
|
||||
event_name == "session_canceled"
|
||||
isinstance(payload, SessionCanceledEvent)
|
||||
and self._queue_item
|
||||
and self._queue_item.item_id == event[1]["data"]["queue_item_id"]
|
||||
and self._queue_item.item_id == payload.item_id
|
||||
):
|
||||
self._cancel_event.set()
|
||||
self._poll_now()
|
||||
elif (
|
||||
event_name == "queue_cleared"
|
||||
isinstance(payload, QueueClearedEvent)
|
||||
and self._queue_item
|
||||
and self._queue_item.queue_id == event[1]["data"]["queue_id"]
|
||||
and self._queue_item.queue_id == payload.queue_id
|
||||
):
|
||||
self._cancel_event.set()
|
||||
self._poll_now()
|
||||
elif event_name == "batch_enqueued":
|
||||
elif isinstance(payload, BatchEnqueuedEvent):
|
||||
self._poll_now()
|
||||
elif event_name == "queue_item_status_changed" and event[1]["data"]["queue_item"]["status"] in [
|
||||
"completed",
|
||||
"failed",
|
||||
"canceled",
|
||||
]:
|
||||
elif isinstance(payload, QueueItemStatusChangedEvent) and payload.status in ("completed", "failed", "canceled"):
|
||||
self._poll_now()
|
||||
|
||||
def resume(self) -> SessionProcessorStatus:
|
||||
@ -139,6 +139,7 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
poll_now_event.wait(self._polling_interval)
|
||||
continue
|
||||
|
||||
self._invoker.services.events.emit_session_started(self._queue_item)
|
||||
self._invoker.services.logger.debug(f"Executing queue item {self._queue_item.item_id}")
|
||||
cancel_event.clear()
|
||||
|
||||
@ -153,16 +154,7 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
while self._invocation is not None and not cancel_event.is_set():
|
||||
# get the source node id to provide to clients (the prepared node id is not as useful)
|
||||
source_invocation_id = self._queue_item.session.prepared_source_mapping[self._invocation.id]
|
||||
|
||||
# Send starting event
|
||||
self._invoker.services.events.emit_invocation_started(
|
||||
queue_batch_id=self._queue_item.batch_id,
|
||||
queue_item_id=self._queue_item.item_id,
|
||||
queue_id=self._queue_item.queue_id,
|
||||
graph_execution_state_id=self._queue_item.session_id,
|
||||
node=self._invocation.model_dump(),
|
||||
source_node_id=source_invocation_id,
|
||||
)
|
||||
self._invoker.services.events.emit_invocation_started(self._queue_item, self._invocation)
|
||||
|
||||
# Innermost processor try block; any unhandled exception is an invocation error & will fail the graph
|
||||
try:
|
||||
@ -189,19 +181,12 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
# Save outputs and history
|
||||
self._queue_item.session.complete(self._invocation.id, outputs)
|
||||
|
||||
# Send complete event
|
||||
self._invoker.services.events.emit_invocation_complete(
|
||||
queue_batch_id=self._queue_item.batch_id,
|
||||
queue_item_id=self._queue_item.item_id,
|
||||
queue_id=self._queue_item.queue_id,
|
||||
graph_execution_state_id=self._queue_item.session.id,
|
||||
node=self._invocation.model_dump(),
|
||||
source_node_id=source_invocation_id,
|
||||
result=outputs.model_dump(),
|
||||
self._queue_item, self._invocation, outputs
|
||||
)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
# TODO(MM2): Create an event for this
|
||||
# TODO(MM2): I don't think this is ever raised...
|
||||
pass
|
||||
|
||||
except CanceledException:
|
||||
@ -227,14 +212,9 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
)
|
||||
self._invoker.services.logger.error(error)
|
||||
|
||||
# Send error event
|
||||
self._invoker.services.events.emit_invocation_error(
|
||||
queue_batch_id=self._queue_item.session_id,
|
||||
queue_item_id=self._queue_item.item_id,
|
||||
queue_id=self._queue_item.queue_id,
|
||||
graph_execution_state_id=self._queue_item.session.id,
|
||||
node=self._invocation.model_dump(),
|
||||
source_node_id=source_invocation_id,
|
||||
queue_item=self._queue_item,
|
||||
invocation=self._invocation,
|
||||
error_type=e.__class__.__name__,
|
||||
error=error,
|
||||
)
|
||||
@ -242,13 +222,10 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
|
||||
# The session is complete if the all invocations are complete or there was an error
|
||||
if self._queue_item.session.is_complete() or cancel_event.is_set():
|
||||
# Send complete event
|
||||
self._invoker.services.events.emit_graph_execution_complete(
|
||||
queue_batch_id=self._queue_item.batch_id,
|
||||
queue_item_id=self._queue_item.item_id,
|
||||
queue_id=self._queue_item.queue_id,
|
||||
graph_execution_state_id=self._queue_item.session.id,
|
||||
self._invoker.services.session_queue.set_queue_item_session(
|
||||
self._queue_item.item_id, self._queue_item.session
|
||||
)
|
||||
self._invoker.services.events.emit_session_complete(self._queue_item)
|
||||
# If we are profiling, stop the profiler and dump the profile & stats
|
||||
if self._profiler:
|
||||
profile_path = self._profiler.stop()
|
||||
@ -279,6 +256,9 @@ class DefaultSessionProcessor(SessionProcessorBase):
|
||||
)
|
||||
# Cancel the queue item
|
||||
if self._queue_item is not None:
|
||||
self._invoker.services.session_queue.set_queue_item_session(
|
||||
self._queue_item.item_id, self._queue_item.session
|
||||
)
|
||||
self._invoker.services.session_queue.cancel_queue_item(
|
||||
self._queue_item.item_id, error=traceback.format_exc()
|
||||
)
|
||||
|
@ -16,6 +16,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
SessionQueueItemDTO,
|
||||
SessionQueueStatus,
|
||||
)
|
||||
from invokeai.app.services.shared.graph import GraphExecutionState
|
||||
from invokeai.app.services.shared.pagination import CursorPaginatedResults
|
||||
|
||||
|
||||
@ -103,3 +104,8 @@ class SessionQueueBase(ABC):
|
||||
def get_queue_item(self, item_id: int) -> SessionQueueItem:
|
||||
"""Gets a session queue item by ID"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set_queue_item_session(self, item_id: int, session: GraphExecutionState) -> SessionQueueItem:
|
||||
"""Sets the session for a session queue item. Use this to update the session state."""
|
||||
pass
|
||||
|
@ -2,10 +2,13 @@ import sqlite3
|
||||
import threading
|
||||
from typing import Optional, Union, cast
|
||||
|
||||
from fastapi_events.handlers.local import local_handler
|
||||
from fastapi_events.typing import Event as FastAPIEvent
|
||||
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
from invokeai.app.services.events.events_common import (
|
||||
FastAPIEvent,
|
||||
InvocationErrorEvent,
|
||||
SessionCanceledEvent,
|
||||
SessionCompleteEvent,
|
||||
register_events,
|
||||
)
|
||||
from invokeai.app.services.invoker import Invoker
|
||||
from invokeai.app.services.session_queue.session_queue_base import SessionQueueBase
|
||||
from invokeai.app.services.session_queue.session_queue_common import (
|
||||
@ -27,6 +30,7 @@ from invokeai.app.services.session_queue.session_queue_common import (
|
||||
calc_session_count,
|
||||
prepare_values_to_insert,
|
||||
)
|
||||
from invokeai.app.services.shared.graph import GraphExecutionState
|
||||
from invokeai.app.services.shared.pagination import CursorPaginatedResults
|
||||
from invokeai.app.services.shared.sqlite.sqlite_database import SqliteDatabase
|
||||
|
||||
@ -41,7 +45,11 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
self.__invoker = invoker
|
||||
self._set_in_progress_to_canceled()
|
||||
prune_result = self.prune(DEFAULT_QUEUE_ID)
|
||||
local_handler.register(event_name=EventServiceBase.queue_event, _func=self._on_session_event)
|
||||
|
||||
register_events(events={InvocationErrorEvent}, func=self._handle_error_event)
|
||||
register_events(events={SessionCompleteEvent}, func=self._handle_complete_event)
|
||||
register_events(events={SessionCanceledEvent}, func=self._handle_cancel_event)
|
||||
|
||||
if prune_result.deleted > 0:
|
||||
self.__invoker.services.logger.info(f"Pruned {prune_result.deleted} finished queue items")
|
||||
|
||||
@ -51,51 +59,35 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
self.__conn = db.conn
|
||||
self.__cursor = self.__conn.cursor()
|
||||
|
||||
def _match_event_name(self, event: FastAPIEvent, match_in: list[str]) -> bool:
|
||||
return event[1]["event"] in match_in
|
||||
|
||||
async def _on_session_event(self, event: FastAPIEvent) -> FastAPIEvent:
|
||||
event_name = event[1]["event"]
|
||||
|
||||
# This was a match statement, but match is not supported on python 3.9
|
||||
if event_name == "graph_execution_state_complete":
|
||||
await self._handle_complete_event(event)
|
||||
elif event_name == "invocation_error":
|
||||
await self._handle_error_event(event)
|
||||
elif event_name == "session_canceled":
|
||||
await self._handle_cancel_event(event)
|
||||
return event
|
||||
|
||||
async def _handle_complete_event(self, event: FastAPIEvent) -> None:
|
||||
async def _handle_complete_event(self, event: FastAPIEvent[SessionCompleteEvent]) -> None:
|
||||
try:
|
||||
item_id = event[1]["data"]["queue_item_id"]
|
||||
# When a queue item has an error, we get an error event, then a completed event.
|
||||
# Mark the queue item completed only if it isn't already marked completed, e.g.
|
||||
# by a previously-handled error event.
|
||||
queue_item = self.get_queue_item(item_id)
|
||||
if queue_item.status not in ["completed", "failed", "canceled"]:
|
||||
queue_item = self._set_queue_item_status(item_id=queue_item.item_id, status="completed")
|
||||
except SessionQueueItemNotFoundError:
|
||||
return
|
||||
_event_name, payload = event
|
||||
|
||||
async def _handle_error_event(self, event: FastAPIEvent) -> None:
|
||||
queue_item = self.get_queue_item(payload.item_id)
|
||||
if queue_item.status not in ["completed", "failed", "canceled"]:
|
||||
self._set_queue_item_status(item_id=payload.item_id, status="completed")
|
||||
except SessionQueueItemNotFoundError:
|
||||
pass
|
||||
|
||||
async def _handle_error_event(self, event: FastAPIEvent[InvocationErrorEvent]) -> None:
|
||||
try:
|
||||
item_id = event[1]["data"]["queue_item_id"]
|
||||
error = event[1]["data"]["error"]
|
||||
queue_item = self.get_queue_item(item_id)
|
||||
_event_name, payload = event
|
||||
# always set to failed if have an error, even if previously the item was marked completed or canceled
|
||||
queue_item = self._set_queue_item_status(item_id=queue_item.item_id, status="failed", error=error)
|
||||
self._set_queue_item_status(item_id=payload.item_id, status="failed", error=payload.error)
|
||||
except SessionQueueItemNotFoundError:
|
||||
return
|
||||
pass
|
||||
|
||||
async def _handle_cancel_event(self, event: FastAPIEvent) -> None:
|
||||
async def _handle_cancel_event(self, event: FastAPIEvent[SessionCanceledEvent]) -> None:
|
||||
try:
|
||||
item_id = event[1]["data"]["queue_item_id"]
|
||||
queue_item = self.get_queue_item(item_id)
|
||||
_event_name, payload = event
|
||||
queue_item = self.get_queue_item(payload.item_id)
|
||||
if queue_item.status not in ["completed", "failed", "canceled"]:
|
||||
queue_item = self._set_queue_item_status(item_id=queue_item.item_id, status="canceled")
|
||||
self._set_queue_item_status(item_id=payload.item_id, status="canceled")
|
||||
except SessionQueueItemNotFoundError:
|
||||
return
|
||||
pass
|
||||
|
||||
def _set_in_progress_to_canceled(self) -> None:
|
||||
"""
|
||||
@ -292,11 +284,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
queue_item = self.get_queue_item(item_id)
|
||||
batch_status = self.get_batch_status(queue_id=queue_item.queue_id, batch_id=queue_item.batch_id)
|
||||
queue_status = self.get_queue_status(queue_id=queue_item.queue_id)
|
||||
self.__invoker.services.events.emit_queue_item_status_changed(
|
||||
session_queue_item=queue_item,
|
||||
batch_status=batch_status,
|
||||
queue_status=queue_status,
|
||||
)
|
||||
self.__invoker.services.events.emit_queue_item_status_changed(queue_item, batch_status, queue_status)
|
||||
return queue_item
|
||||
|
||||
def is_empty(self, queue_id: str) -> IsEmptyResult:
|
||||
@ -429,12 +417,7 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
if queue_item.status not in ["canceled", "failed", "completed"]:
|
||||
status = "failed" if error is not None else "canceled"
|
||||
queue_item = self._set_queue_item_status(item_id=item_id, status=status, error=error) # type: ignore [arg-type] # mypy seems to not narrow the Literals here
|
||||
self.__invoker.services.events.emit_session_canceled(
|
||||
queue_item_id=queue_item.item_id,
|
||||
queue_id=queue_item.queue_id,
|
||||
queue_batch_id=queue_item.batch_id,
|
||||
graph_execution_state_id=queue_item.session_id,
|
||||
)
|
||||
self.__invoker.services.events.emit_session_canceled(queue_item)
|
||||
return queue_item
|
||||
|
||||
def cancel_by_batch_ids(self, queue_id: str, batch_ids: list[str]) -> CancelByBatchIDsResult:
|
||||
@ -470,18 +453,11 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
)
|
||||
self.__conn.commit()
|
||||
if current_queue_item is not None and current_queue_item.batch_id in batch_ids:
|
||||
self.__invoker.services.events.emit_session_canceled(
|
||||
queue_item_id=current_queue_item.item_id,
|
||||
queue_id=current_queue_item.queue_id,
|
||||
queue_batch_id=current_queue_item.batch_id,
|
||||
graph_execution_state_id=current_queue_item.session_id,
|
||||
)
|
||||
self.__invoker.services.events.emit_session_canceled(current_queue_item)
|
||||
batch_status = self.get_batch_status(queue_id=queue_id, batch_id=current_queue_item.batch_id)
|
||||
queue_status = self.get_queue_status(queue_id=queue_id)
|
||||
self.__invoker.services.events.emit_queue_item_status_changed(
|
||||
session_queue_item=current_queue_item,
|
||||
batch_status=batch_status,
|
||||
queue_status=queue_status,
|
||||
current_queue_item, batch_status, queue_status
|
||||
)
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
@ -521,18 +497,11 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
)
|
||||
self.__conn.commit()
|
||||
if current_queue_item is not None and current_queue_item.queue_id == queue_id:
|
||||
self.__invoker.services.events.emit_session_canceled(
|
||||
queue_item_id=current_queue_item.item_id,
|
||||
queue_id=current_queue_item.queue_id,
|
||||
queue_batch_id=current_queue_item.batch_id,
|
||||
graph_execution_state_id=current_queue_item.session_id,
|
||||
)
|
||||
self.__invoker.services.events.emit_session_canceled(current_queue_item)
|
||||
batch_status = self.get_batch_status(queue_id=queue_id, batch_id=current_queue_item.batch_id)
|
||||
queue_status = self.get_queue_status(queue_id=queue_id)
|
||||
self.__invoker.services.events.emit_queue_item_status_changed(
|
||||
session_queue_item=current_queue_item,
|
||||
batch_status=batch_status,
|
||||
queue_status=queue_status,
|
||||
current_queue_item, batch_status, queue_status
|
||||
)
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
@ -562,6 +531,29 @@ class SqliteSessionQueue(SessionQueueBase):
|
||||
raise SessionQueueItemNotFoundError(f"No queue item with id {item_id}")
|
||||
return SessionQueueItem.queue_item_from_dict(dict(result))
|
||||
|
||||
def set_queue_item_session(self, item_id: int, session: GraphExecutionState) -> SessionQueueItem:
|
||||
try:
|
||||
# Use exclude_none so we don't end up with a bunch of nulls in the graph - this can cause validation errors
|
||||
# when the graph is loaded. Graph execution occurs purely in memory - the session saved here is not referenced
|
||||
# during execution.
|
||||
session_json = session.model_dump_json(warnings=False, exclude_none=True)
|
||||
self.__lock.acquire()
|
||||
self.__cursor.execute(
|
||||
"""--sql
|
||||
UPDATE session_queue
|
||||
SET session = ?
|
||||
WHERE item_id = ?
|
||||
""",
|
||||
(session_json, item_id),
|
||||
)
|
||||
self.__conn.commit()
|
||||
except Exception:
|
||||
self.__conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
self.__lock.release()
|
||||
return self.get_queue_item(item_id)
|
||||
|
||||
def list_queue_items(
|
||||
self,
|
||||
queue_id: str,
|
||||
|
@ -180,9 +180,9 @@ class ImagesInterface(InvocationContextInterface):
|
||||
# If `metadata` is provided directly, use that. Else, use the metadata provided by `WithMetadata`, falling back to None.
|
||||
metadata_ = None
|
||||
if metadata:
|
||||
metadata_ = metadata
|
||||
elif isinstance(self._data.invocation, WithMetadata):
|
||||
metadata_ = self._data.invocation.metadata
|
||||
metadata_ = metadata.model_dump_json()
|
||||
elif isinstance(self._data.invocation, WithMetadata) and self._data.invocation.metadata:
|
||||
metadata_ = self._data.invocation.metadata.model_dump_json()
|
||||
|
||||
# If `board_id` is provided directly, use that. Else, use the board provided by `WithBoard`, falling back to None.
|
||||
board_id_ = None
|
||||
@ -191,6 +191,14 @@ class ImagesInterface(InvocationContextInterface):
|
||||
elif isinstance(self._data.invocation, WithBoard) and self._data.invocation.board:
|
||||
board_id_ = self._data.invocation.board.board_id
|
||||
|
||||
workflow_ = None
|
||||
if self._data.queue_item.workflow:
|
||||
workflow_ = self._data.queue_item.workflow.model_dump_json()
|
||||
|
||||
graph_ = None
|
||||
if self._data.queue_item.session.graph:
|
||||
graph_ = self._data.queue_item.session.graph.model_dump_json()
|
||||
|
||||
return self._services.images.create(
|
||||
image=image,
|
||||
is_intermediate=self._data.invocation.is_intermediate,
|
||||
@ -198,7 +206,8 @@ class ImagesInterface(InvocationContextInterface):
|
||||
board_id=board_id_,
|
||||
metadata=metadata_,
|
||||
image_origin=ResourceOrigin.INTERNAL,
|
||||
workflow=self._data.queue_item.workflow,
|
||||
workflow=workflow_,
|
||||
graph=graph_,
|
||||
session_id=self._data.queue_item.session_id,
|
||||
node_id=self._data.invocation.id,
|
||||
)
|
||||
@ -344,11 +353,11 @@ class ModelsInterface(InvocationContextInterface):
|
||||
|
||||
if isinstance(identifier, str):
|
||||
model = self._services.model_manager.store.get_model(identifier)
|
||||
return self._services.model_manager.load.load_model(model, submodel_type, self._data)
|
||||
return self._services.model_manager.load.load_model(model, submodel_type)
|
||||
else:
|
||||
_submodel_type = submodel_type or identifier.submodel_type
|
||||
model = self._services.model_manager.store.get_model(identifier.key)
|
||||
return self._services.model_manager.load.load_model(model, _submodel_type, self._data)
|
||||
return self._services.model_manager.load.load_model(model, _submodel_type)
|
||||
|
||||
def load_by_attrs(
|
||||
self, name: str, base: BaseModelType, type: ModelType, submodel_type: Optional[SubModelType] = None
|
||||
@ -373,7 +382,7 @@ class ModelsInterface(InvocationContextInterface):
|
||||
if len(configs) > 1:
|
||||
raise ValueError(f"More than one model found with name {name}, base {base}, and type {type}")
|
||||
|
||||
return self._services.model_manager.load.load_model(configs[0], submodel_type, self._data)
|
||||
return self._services.model_manager.load.load_model(configs[0], submodel_type)
|
||||
|
||||
def get_config(self, identifier: Union[str, "ModelIdentifierField"]) -> AnyModelConfig:
|
||||
"""Gets a model's config.
|
||||
|
@ -1,13 +1,21 @@
|
||||
from typing import Union
|
||||
from typing import Any, Literal, Union
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
from controlnet_aux.util import HWC3
|
||||
from diffusers.utils import PIL_INTERPOLATION
|
||||
from einops import rearrange
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.backend.image_util.util import nms, normalize_image_channel_count
|
||||
|
||||
CONTROLNET_RESIZE_VALUES = Literal[
|
||||
"just_resize",
|
||||
"crop_resize",
|
||||
"fill_resize",
|
||||
"just_resize_simple",
|
||||
]
|
||||
CONTROLNET_MODE_VALUES = Literal["balanced", "more_prompt", "more_control", "unbalanced"]
|
||||
|
||||
###################################################################
|
||||
# Copy of scripts/lvminthin.py from Mikubill/sd-webui-controlnet
|
||||
###################################################################
|
||||
@ -68,17 +76,6 @@ def lvmin_thin(x, prunings=True):
|
||||
return y
|
||||
|
||||
|
||||
def nake_nms(x):
|
||||
f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
|
||||
f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
|
||||
f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
|
||||
f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
|
||||
y = np.zeros_like(x)
|
||||
for f in [f1, f2, f3, f4]:
|
||||
np.putmask(y, cv2.dilate(x, kernel=f) == x, x)
|
||||
return y
|
||||
|
||||
|
||||
################################################################################
|
||||
# copied from Mikubill/sd-webui-controlnet external_code.py and modified for InvokeAI
|
||||
################################################################################
|
||||
@ -134,98 +131,122 @@ def pixel_perfect_resolution(
|
||||
return int(np.round(estimation))
|
||||
|
||||
|
||||
def clone_contiguous(x: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
|
||||
"""Get a memory-contiguous clone of the given numpy array, as a safety measure and to improve computation efficiency."""
|
||||
return np.ascontiguousarray(x).copy()
|
||||
|
||||
|
||||
def np_img_to_torch(np_img: np.ndarray[Any, Any], device: torch.device) -> torch.Tensor:
|
||||
"""Convert a numpy image to a PyTorch tensor. The image is normalized to 0-1, rearranged to BCHW format and sent to
|
||||
the specified device."""
|
||||
|
||||
torch_img = torch.from_numpy(np_img)
|
||||
normalized = torch_img.float() / 255.0
|
||||
bchw = rearrange(normalized, "h w c -> 1 c h w")
|
||||
on_device = bchw.to(device)
|
||||
return on_device.clone()
|
||||
|
||||
|
||||
def heuristic_resize(np_img: np.ndarray[Any, Any], size: tuple[int, int]) -> np.ndarray[Any, Any]:
|
||||
"""Resizes an image using a heuristic to choose the best resizing strategy.
|
||||
|
||||
- If the image appears to be an edge map, special handling will be applied to ensure the edges are not distorted.
|
||||
- Single-pixel edge maps use NMS and thinning to keep the edges as single-pixel lines.
|
||||
- Low-color-count images are resized with nearest-neighbor to preserve color information (for e.g. segmentation maps).
|
||||
- The alpha channel is handled separately to ensure it is resized correctly.
|
||||
|
||||
Args:
|
||||
np_img (np.ndarray): The input image.
|
||||
size (tuple[int, int]): The target size for the image.
|
||||
|
||||
Returns:
|
||||
np.ndarray: The resized image.
|
||||
|
||||
Adapted from https://github.com/Mikubill/sd-webui-controlnet.
|
||||
"""
|
||||
|
||||
# Return early if the image is already at the requested size
|
||||
if np_img.shape[0] == size[1] and np_img.shape[1] == size[0]:
|
||||
return np_img
|
||||
|
||||
# If the image has an alpha channel, separate it for special handling later.
|
||||
inpaint_mask = None
|
||||
if np_img.ndim == 3 and np_img.shape[2] == 4:
|
||||
inpaint_mask = np_img[:, :, 3]
|
||||
np_img = np_img[:, :, 0:3]
|
||||
|
||||
new_size_is_smaller = (size[0] * size[1]) < (np_img.shape[0] * np_img.shape[1])
|
||||
new_size_is_bigger = (size[0] * size[1]) > (np_img.shape[0] * np_img.shape[1])
|
||||
unique_color_count = np.unique(np_img.reshape(-1, np_img.shape[2]), axis=0).shape[0]
|
||||
is_one_pixel_edge = False
|
||||
is_binary = False
|
||||
|
||||
if unique_color_count == 2:
|
||||
# If the image has only two colors, it is likely binary. Check if the image has one-pixel edges.
|
||||
is_binary = np.min(np_img) < 16 and np.max(np_img) > 240
|
||||
if is_binary:
|
||||
eroded = cv2.erode(np_img, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1)
|
||||
dilated = cv2.dilate(eroded, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1)
|
||||
one_pixel_edge_count = np.where(dilated < np_img)[0].shape[0]
|
||||
all_edge_count = np.where(np_img > 127)[0].shape[0]
|
||||
is_one_pixel_edge = one_pixel_edge_count * 2 > all_edge_count
|
||||
|
||||
if 2 < unique_color_count < 200:
|
||||
# With a low color count, we assume this is a map where exact colors are important. Near-neighbor preserves
|
||||
# the colors as needed.
|
||||
interpolation = cv2.INTER_NEAREST
|
||||
elif new_size_is_smaller:
|
||||
# This works best for downscaling
|
||||
interpolation = cv2.INTER_AREA
|
||||
else:
|
||||
# Fall back for other cases
|
||||
interpolation = cv2.INTER_CUBIC # Must be CUBIC because we now use nms. NEVER CHANGE THIS
|
||||
|
||||
# This may be further transformed depending on the binary nature of the image.
|
||||
resized = cv2.resize(np_img, size, interpolation=interpolation)
|
||||
|
||||
if inpaint_mask is not None:
|
||||
# Resize the inpaint mask to match the resized image using the same interpolation method.
|
||||
inpaint_mask = cv2.resize(inpaint_mask, size, interpolation=interpolation)
|
||||
|
||||
# If the image is binary, we will perform some additional processing to ensure the edges are preserved.
|
||||
if is_binary:
|
||||
resized = np.mean(resized.astype(np.float32), axis=2).clip(0, 255).astype(np.uint8)
|
||||
if is_one_pixel_edge:
|
||||
# Use NMS and thinning to keep the edges as single-pixel lines.
|
||||
resized = nms(resized)
|
||||
_, resized = cv2.threshold(resized, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
resized = lvmin_thin(resized, prunings=new_size_is_bigger)
|
||||
else:
|
||||
_, resized = cv2.threshold(resized, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
resized = np.stack([resized] * 3, axis=2)
|
||||
|
||||
# Restore the alpha channel if it was present.
|
||||
if inpaint_mask is not None:
|
||||
inpaint_mask = (inpaint_mask > 127).astype(np.float32) * 255.0
|
||||
inpaint_mask = inpaint_mask[:, :, None].clip(0, 255).astype(np.uint8)
|
||||
resized = np.concatenate([resized, inpaint_mask], axis=2)
|
||||
|
||||
return resized
|
||||
|
||||
|
||||
###########################################################################
|
||||
# Copied from detectmap_proc method in scripts/detectmap_proc.py in Mikubill/sd-webui-controlnet
|
||||
# modified for InvokeAI
|
||||
###########################################################################
|
||||
# def detectmap_proc(detected_map, module, resize_mode, h, w):
|
||||
def np_img_resize(np_img: np.ndarray, resize_mode: str, h: int, w: int, device: torch.device = torch.device("cpu")):
|
||||
# if 'inpaint' in module:
|
||||
# np_img = np_img.astype(np.float32)
|
||||
# else:
|
||||
# np_img = HWC3(np_img)
|
||||
np_img = HWC3(np_img)
|
||||
def np_img_resize(
|
||||
np_img: np.ndarray,
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES,
|
||||
h: int,
|
||||
w: int,
|
||||
device: torch.device = torch.device("cpu"),
|
||||
) -> tuple[torch.Tensor, np.ndarray[Any, Any]]:
|
||||
np_img = normalize_image_channel_count(np_img)
|
||||
|
||||
def safe_numpy(x):
|
||||
# A very safe method to make sure that Apple/Mac works
|
||||
y = x
|
||||
|
||||
# below is very boring but do not change these. If you change these Apple or Mac may fail.
|
||||
y = y.copy()
|
||||
y = np.ascontiguousarray(y)
|
||||
y = y.copy()
|
||||
return y
|
||||
|
||||
def get_pytorch_control(x):
|
||||
# A very safe method to make sure that Apple/Mac works
|
||||
y = x
|
||||
|
||||
# below is very boring but do not change these. If you change these Apple or Mac may fail.
|
||||
y = torch.from_numpy(y)
|
||||
y = y.float() / 255.0
|
||||
y = rearrange(y, "h w c -> 1 c h w")
|
||||
y = y.clone()
|
||||
# y = y.to(devices.get_device_for("controlnet"))
|
||||
y = y.to(device)
|
||||
y = y.clone()
|
||||
return y
|
||||
|
||||
def high_quality_resize(x: np.ndarray, size):
|
||||
# Written by lvmin
|
||||
# Super high-quality control map up-scaling, considering binary, seg, and one-pixel edges
|
||||
inpaint_mask = None
|
||||
if x.ndim == 3 and x.shape[2] == 4:
|
||||
inpaint_mask = x[:, :, 3]
|
||||
x = x[:, :, 0:3]
|
||||
|
||||
new_size_is_smaller = (size[0] * size[1]) < (x.shape[0] * x.shape[1])
|
||||
new_size_is_bigger = (size[0] * size[1]) > (x.shape[0] * x.shape[1])
|
||||
unique_color_count = np.unique(x.reshape(-1, x.shape[2]), axis=0).shape[0]
|
||||
is_one_pixel_edge = False
|
||||
is_binary = False
|
||||
if unique_color_count == 2:
|
||||
is_binary = np.min(x) < 16 and np.max(x) > 240
|
||||
if is_binary:
|
||||
xc = x
|
||||
xc = cv2.erode(xc, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1)
|
||||
xc = cv2.dilate(xc, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1)
|
||||
one_pixel_edge_count = np.where(xc < x)[0].shape[0]
|
||||
all_edge_count = np.where(x > 127)[0].shape[0]
|
||||
is_one_pixel_edge = one_pixel_edge_count * 2 > all_edge_count
|
||||
|
||||
if 2 < unique_color_count < 200:
|
||||
interpolation = cv2.INTER_NEAREST
|
||||
elif new_size_is_smaller:
|
||||
interpolation = cv2.INTER_AREA
|
||||
else:
|
||||
interpolation = cv2.INTER_CUBIC # Must be CUBIC because we now use nms. NEVER CHANGE THIS
|
||||
|
||||
y = cv2.resize(x, size, interpolation=interpolation)
|
||||
if inpaint_mask is not None:
|
||||
inpaint_mask = cv2.resize(inpaint_mask, size, interpolation=interpolation)
|
||||
|
||||
if is_binary:
|
||||
y = np.mean(y.astype(np.float32), axis=2).clip(0, 255).astype(np.uint8)
|
||||
if is_one_pixel_edge:
|
||||
y = nake_nms(y)
|
||||
_, y = cv2.threshold(y, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
y = lvmin_thin(y, prunings=new_size_is_bigger)
|
||||
else:
|
||||
_, y = cv2.threshold(y, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
||||
y = np.stack([y] * 3, axis=2)
|
||||
|
||||
if inpaint_mask is not None:
|
||||
inpaint_mask = (inpaint_mask > 127).astype(np.float32) * 255.0
|
||||
inpaint_mask = inpaint_mask[:, :, None].clip(0, 255).astype(np.uint8)
|
||||
y = np.concatenate([y, inpaint_mask], axis=2)
|
||||
|
||||
return y
|
||||
|
||||
# if resize_mode == external_code.ResizeMode.RESIZE:
|
||||
if resize_mode == "just_resize": # RESIZE
|
||||
np_img = high_quality_resize(np_img, (w, h))
|
||||
np_img = safe_numpy(np_img)
|
||||
return get_pytorch_control(np_img), np_img
|
||||
np_img = heuristic_resize(np_img, (w, h))
|
||||
np_img = clone_contiguous(np_img)
|
||||
return np_img_to_torch(np_img, device), np_img
|
||||
|
||||
old_h, old_w, _ = np_img.shape
|
||||
old_w = float(old_w)
|
||||
@ -236,7 +257,6 @@ def np_img_resize(np_img: np.ndarray, resize_mode: str, h: int, w: int, device:
|
||||
def safeint(x: Union[int, float]) -> int:
|
||||
return int(np.round(x))
|
||||
|
||||
# if resize_mode == external_code.ResizeMode.OUTER_FIT:
|
||||
if resize_mode == "fill_resize": # OUTER_FIT
|
||||
k = min(k0, k1)
|
||||
borders = np.concatenate([np_img[0, :, :], np_img[-1, :, :], np_img[:, 0, :], np_img[:, -1, :]], axis=0)
|
||||
@ -245,23 +265,23 @@ def np_img_resize(np_img: np.ndarray, resize_mode: str, h: int, w: int, device:
|
||||
# Inpaint hijack
|
||||
high_quality_border_color[3] = 255
|
||||
high_quality_background = np.tile(high_quality_border_color[None, None], [h, w, 1])
|
||||
np_img = high_quality_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
np_img = heuristic_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
new_h, new_w, _ = np_img.shape
|
||||
pad_h = max(0, (h - new_h) // 2)
|
||||
pad_w = max(0, (w - new_w) // 2)
|
||||
high_quality_background[pad_h : pad_h + new_h, pad_w : pad_w + new_w] = np_img
|
||||
np_img = high_quality_background
|
||||
np_img = safe_numpy(np_img)
|
||||
return get_pytorch_control(np_img), np_img
|
||||
np_img = clone_contiguous(np_img)
|
||||
return np_img_to_torch(np_img, device), np_img
|
||||
else: # resize_mode == "crop_resize" (INNER_FIT)
|
||||
k = max(k0, k1)
|
||||
np_img = high_quality_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
np_img = heuristic_resize(np_img, (safeint(old_w * k), safeint(old_h * k)))
|
||||
new_h, new_w, _ = np_img.shape
|
||||
pad_h = max(0, (new_h - h) // 2)
|
||||
pad_w = max(0, (new_w - w) // 2)
|
||||
np_img = np_img[pad_h : pad_h + h, pad_w : pad_w + w]
|
||||
np_img = safe_numpy(np_img)
|
||||
return get_pytorch_control(np_img), np_img
|
||||
np_img = clone_contiguous(np_img)
|
||||
return np_img_to_torch(np_img, device), np_img
|
||||
|
||||
|
||||
def prepare_control_image(
|
||||
@ -269,12 +289,12 @@ def prepare_control_image(
|
||||
width: int,
|
||||
height: int,
|
||||
num_channels: int = 3,
|
||||
device="cuda",
|
||||
dtype=torch.float16,
|
||||
do_classifier_free_guidance=True,
|
||||
control_mode="balanced",
|
||||
resize_mode="just_resize_simple",
|
||||
):
|
||||
device: str = "cuda",
|
||||
dtype: torch.dtype = torch.float16,
|
||||
control_mode: CONTROLNET_MODE_VALUES = "balanced",
|
||||
resize_mode: CONTROLNET_RESIZE_VALUES = "just_resize_simple",
|
||||
do_classifier_free_guidance: bool = True,
|
||||
) -> torch.Tensor:
|
||||
"""Pre-process images for ControlNets or T2I-Adapters.
|
||||
|
||||
Args:
|
||||
@ -292,26 +312,15 @@ def prepare_control_image(
|
||||
resize_mode (str, optional): Defaults to "just_resize_simple".
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If resize_mode == "crop_resize_simple".
|
||||
NotImplementedError: If resize_mode == "fill_resize_simple".
|
||||
ValueError: If `resize_mode` is not recognized.
|
||||
ValueError: If `num_channels` is out of range.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: The pre-processed input tensor.
|
||||
"""
|
||||
if (
|
||||
resize_mode == "just_resize_simple"
|
||||
or resize_mode == "crop_resize_simple"
|
||||
or resize_mode == "fill_resize_simple"
|
||||
):
|
||||
if resize_mode == "just_resize_simple":
|
||||
image = image.convert("RGB")
|
||||
if resize_mode == "just_resize_simple":
|
||||
image = image.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])
|
||||
elif resize_mode == "crop_resize_simple":
|
||||
raise NotImplementedError(f"prepare_control_image is not implemented for resize_mode='{resize_mode}'.")
|
||||
elif resize_mode == "fill_resize_simple":
|
||||
raise NotImplementedError(f"prepare_control_image is not implemented for resize_mode='{resize_mode}'.")
|
||||
image = image.resize((width, height), resample=Image.LANCZOS)
|
||||
nimage = np.array(image)
|
||||
nimage = nimage[None, :]
|
||||
nimage = np.concatenate([nimage], axis=0)
|
||||
@ -328,8 +337,7 @@ def prepare_control_image(
|
||||
resize_mode=resize_mode,
|
||||
h=height,
|
||||
w=width,
|
||||
# device=torch.device('cpu')
|
||||
device=device,
|
||||
device=torch.device(device),
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported resize_mode: '{resize_mode}'.")
|
||||
|
@ -1,4 +1,4 @@
|
||||
from typing import TYPE_CHECKING, Callable
|
||||
from typing import TYPE_CHECKING, Callable, Optional
|
||||
|
||||
import torch
|
||||
from PIL import Image
|
||||
@ -13,8 +13,36 @@ if TYPE_CHECKING:
|
||||
from invokeai.app.services.events.events_base import EventServiceBase
|
||||
from invokeai.app.services.shared.invocation_context import InvocationContextData
|
||||
|
||||
# fast latents preview matrix for sdxl
|
||||
# generated by @StAlKeR7779
|
||||
SDXL_LATENT_RGB_FACTORS = [
|
||||
# R G B
|
||||
[0.3816, 0.4930, 0.5320],
|
||||
[-0.3753, 0.1631, 0.1739],
|
||||
[0.1770, 0.3588, -0.2048],
|
||||
[-0.4350, -0.2644, -0.4289],
|
||||
]
|
||||
SDXL_SMOOTH_MATRIX = [
|
||||
[0.0358, 0.0964, 0.0358],
|
||||
[0.0964, 0.4711, 0.0964],
|
||||
[0.0358, 0.0964, 0.0358],
|
||||
]
|
||||
|
||||
def sample_to_lowres_estimated_image(samples, latent_rgb_factors, smooth_matrix=None):
|
||||
# origingally adapted from code by @erucipe and @keturn here:
|
||||
# https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7
|
||||
# these updated numbers for v1.5 are from @torridgristle
|
||||
SD1_5_LATENT_RGB_FACTORS = [
|
||||
# R G B
|
||||
[0.3444, 0.1385, 0.0670], # L1
|
||||
[0.1247, 0.4027, 0.1494], # L2
|
||||
[-0.3192, 0.2513, 0.2103], # L3
|
||||
[-0.1307, -0.1874, -0.7445], # L4
|
||||
]
|
||||
|
||||
|
||||
def sample_to_lowres_estimated_image(
|
||||
samples: torch.Tensor, latent_rgb_factors: torch.Tensor, smooth_matrix: Optional[torch.Tensor] = None
|
||||
):
|
||||
latent_image = samples[0].permute(1, 2, 0) @ latent_rgb_factors
|
||||
|
||||
if smooth_matrix is not None:
|
||||
@ -47,64 +75,12 @@ def stable_diffusion_step_callback(
|
||||
else:
|
||||
sample = intermediate_state.latents
|
||||
|
||||
# TODO: This does not seem to be needed any more?
|
||||
# # txt2img provides a Tensor in the step_callback
|
||||
# # img2img provides a PipelineIntermediateState
|
||||
# if isinstance(sample, PipelineIntermediateState):
|
||||
# # this was an img2img
|
||||
# print('img2img')
|
||||
# latents = sample.latents
|
||||
# step = sample.step
|
||||
# else:
|
||||
# print('txt2img')
|
||||
# latents = sample
|
||||
# step = intermediate_state.step
|
||||
|
||||
# TODO: only output a preview image when requested
|
||||
|
||||
if base_model in [BaseModelType.StableDiffusionXL, BaseModelType.StableDiffusionXLRefiner]:
|
||||
# fast latents preview matrix for sdxl
|
||||
# generated by @StAlKeR7779
|
||||
sdxl_latent_rgb_factors = torch.tensor(
|
||||
[
|
||||
# R G B
|
||||
[0.3816, 0.4930, 0.5320],
|
||||
[-0.3753, 0.1631, 0.1739],
|
||||
[0.1770, 0.3588, -0.2048],
|
||||
[-0.4350, -0.2644, -0.4289],
|
||||
],
|
||||
dtype=sample.dtype,
|
||||
device=sample.device,
|
||||
)
|
||||
|
||||
sdxl_smooth_matrix = torch.tensor(
|
||||
[
|
||||
[0.0358, 0.0964, 0.0358],
|
||||
[0.0964, 0.4711, 0.0964],
|
||||
[0.0358, 0.0964, 0.0358],
|
||||
],
|
||||
dtype=sample.dtype,
|
||||
device=sample.device,
|
||||
)
|
||||
|
||||
sdxl_latent_rgb_factors = torch.tensor(SDXL_LATENT_RGB_FACTORS, dtype=sample.dtype, device=sample.device)
|
||||
sdxl_smooth_matrix = torch.tensor(SDXL_SMOOTH_MATRIX, dtype=sample.dtype, device=sample.device)
|
||||
image = sample_to_lowres_estimated_image(sample, sdxl_latent_rgb_factors, sdxl_smooth_matrix)
|
||||
else:
|
||||
# origingally adapted from code by @erucipe and @keturn here:
|
||||
# https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/7
|
||||
|
||||
# these updated numbers for v1.5 are from @torridgristle
|
||||
v1_5_latent_rgb_factors = torch.tensor(
|
||||
[
|
||||
# R G B
|
||||
[0.3444, 0.1385, 0.0670], # L1
|
||||
[0.1247, 0.4027, 0.1494], # L2
|
||||
[-0.3192, 0.2513, 0.2103], # L3
|
||||
[-0.1307, -0.1874, -0.7445], # L4
|
||||
],
|
||||
dtype=sample.dtype,
|
||||
device=sample.device,
|
||||
)
|
||||
|
||||
v1_5_latent_rgb_factors = torch.tensor(SD1_5_LATENT_RGB_FACTORS, dtype=sample.dtype, device=sample.device)
|
||||
image = sample_to_lowres_estimated_image(sample, v1_5_latent_rgb_factors)
|
||||
|
||||
(width, height) = image.size
|
||||
@ -113,15 +89,9 @@ def stable_diffusion_step_callback(
|
||||
|
||||
dataURL = image_to_dataURL(image, image_format="JPEG")
|
||||
|
||||
events.emit_generator_progress(
|
||||
queue_id=context_data.queue_item.queue_id,
|
||||
queue_item_id=context_data.queue_item.item_id,
|
||||
queue_batch_id=context_data.queue_item.batch_id,
|
||||
graph_execution_state_id=context_data.queue_item.session_id,
|
||||
node_id=context_data.invocation.id,
|
||||
source_node_id=context_data.source_invocation_id,
|
||||
progress_image=ProgressImage(width=width, height=height, dataURL=dataURL),
|
||||
step=intermediate_state.step,
|
||||
order=intermediate_state.order,
|
||||
total_steps=intermediate_state.total_steps,
|
||||
events.emit_invocation_denoise_progress(
|
||||
context_data.queue_item,
|
||||
context_data.invocation,
|
||||
intermediate_state,
|
||||
ProgressImage(dataURL=dataURL, width=width, height=height),
|
||||
)
|
||||
|
@ -4,5 +4,4 @@ Initialization file for invokeai.backend.image_util methods.
|
||||
|
||||
from .infill_methods.patchmatch import PatchMatch # noqa: F401
|
||||
from .pngwriter import PngWriter, PromptFormatter, retrieve_metadata, write_metadata # noqa: F401
|
||||
from .seamless import configure_model_padding # noqa: F401
|
||||
from .util import InitImageResizer, make_grid # noqa: F401
|
||||
|
@ -13,7 +13,7 @@ from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.download_with_progress import download_with_progress_bar
|
||||
from invokeai.backend.image_util.depth_anything.model.dpt import DPT_DINOv2
|
||||
from invokeai.backend.image_util.depth_anything.utilities.util import NormalizeImage, PrepareForNet, Resize
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
config = get_config()
|
||||
@ -56,7 +56,7 @@ class DepthAnythingDetector:
|
||||
def __init__(self) -> None:
|
||||
self.model = None
|
||||
self.model_size: Union[Literal["large", "base", "small"], None] = None
|
||||
self.device = choose_torch_device()
|
||||
self.device = TorchDevice.choose_torch_device()
|
||||
|
||||
def load_model(self, model_size: Literal["large", "base", "small"] = "small"):
|
||||
DEPTH_ANYTHING_MODEL_PATH = config.models_path / DEPTH_ANYTHING_MODELS[model_size]["local"]
|
||||
@ -81,7 +81,7 @@ class DepthAnythingDetector:
|
||||
self.model.load_state_dict(torch.load(DEPTH_ANYTHING_MODEL_PATH.as_posix(), map_location="cpu"))
|
||||
self.model.eval()
|
||||
|
||||
self.model.to(choose_torch_device())
|
||||
self.model.to(self.device)
|
||||
return self.model
|
||||
|
||||
def __call__(self, image: Image.Image, resolution: int = 512) -> Image.Image:
|
||||
@ -94,7 +94,7 @@ class DepthAnythingDetector:
|
||||
|
||||
image_height, image_width = np_image.shape[:2]
|
||||
np_image = transform({"image": np_image})["image"]
|
||||
tensor_image = torch.from_numpy(np_image).unsqueeze(0).to(choose_torch_device())
|
||||
tensor_image = torch.from_numpy(np_image).unsqueeze(0).to(self.device)
|
||||
|
||||
with torch.no_grad():
|
||||
depth = self.model(tensor_image)
|
||||
|
@ -7,7 +7,7 @@ import onnxruntime as ort
|
||||
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.download_with_progress import download_with_progress_bar
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
from .onnxdet import inference_detector
|
||||
from .onnxpose import inference_pose
|
||||
@ -28,9 +28,9 @@ config = get_config()
|
||||
|
||||
class Wholebody:
|
||||
def __init__(self):
|
||||
device = choose_torch_device()
|
||||
device = TorchDevice.choose_torch_device()
|
||||
|
||||
providers = ["CUDAExecutionProvider"] if device == "cuda" else ["CPUExecutionProvider"]
|
||||
providers = ["CUDAExecutionProvider"] if device.type == "cuda" else ["CPUExecutionProvider"]
|
||||
|
||||
DET_MODEL_PATH = config.models_path / DWPOSE_MODELS["yolox_l.onnx"]["local"]
|
||||
download_with_progress_bar("yolox_l.onnx", DWPOSE_MODELS["yolox_l.onnx"]["url"], DET_MODEL_PATH)
|
||||
|
@ -8,7 +8,7 @@ from huggingface_hub import hf_hub_download
|
||||
from PIL import Image
|
||||
|
||||
from invokeai.backend.image_util.util import (
|
||||
non_maximum_suppression,
|
||||
nms,
|
||||
normalize_image_channel_count,
|
||||
np_to_pil,
|
||||
pil_to_np,
|
||||
@ -134,7 +134,7 @@ class HEDProcessor:
|
||||
detected_map = cv2.resize(detected_map, (width, height), interpolation=cv2.INTER_LINEAR)
|
||||
|
||||
if scribble:
|
||||
detected_map = non_maximum_suppression(detected_map, 127, 3.0)
|
||||
detected_map = nms(detected_map, 127, 3.0)
|
||||
detected_map = cv2.GaussianBlur(detected_map, (0, 0), 3.0)
|
||||
detected_map[detected_map > 4] = 255
|
||||
detected_map[detected_map < 255] = 0
|
||||
|
@ -8,7 +8,7 @@ from PIL import Image
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.app.util.download_with_progress import download_with_progress_bar
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
def norm_img(np_img):
|
||||
@ -29,7 +29,7 @@ def load_jit_model(url_or_path, device):
|
||||
|
||||
class LaMA:
|
||||
def __call__(self, input_image: Image.Image, *args: Any, **kwds: Any) -> Any:
|
||||
device = choose_torch_device()
|
||||
device = TorchDevice.choose_torch_device()
|
||||
model_location = get_config().models_path / "core/misc/lama/lama.pt"
|
||||
|
||||
if not model_location.exists():
|
||||
|
@ -11,7 +11,7 @@ from cv2.typing import MatLike
|
||||
from tqdm import tqdm
|
||||
|
||||
from invokeai.backend.image_util.basicsr.rrdbnet_arch import RRDBNet
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
"""
|
||||
Adapted from https://github.com/xinntao/Real-ESRGAN/blob/master/realesrgan/utils.py
|
||||
@ -65,7 +65,7 @@ class RealESRGAN:
|
||||
self.pre_pad = pre_pad
|
||||
self.mod_scale: Optional[int] = None
|
||||
self.half = half
|
||||
self.device = choose_torch_device()
|
||||
self.device = TorchDevice.choose_torch_device()
|
||||
|
||||
loadnet = torch.load(model_path, map_location=torch.device("cpu"))
|
||||
|
||||
|
@ -8,14 +8,15 @@ from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
||||
from PIL import Image
|
||||
from PIL import Image, ImageFilter
|
||||
from transformers import AutoFeatureExtractor
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.silence_warnings import SilenceWarnings
|
||||
|
||||
repo_id = "CompVis/stable-diffusion-safety-checker"
|
||||
CHECKER_PATH = "core/convert/stable-diffusion-safety-checker"
|
||||
|
||||
|
||||
@ -24,34 +25,34 @@ class SafetyChecker:
|
||||
Wrapper around SafetyChecker model.
|
||||
"""
|
||||
|
||||
safety_checker = None
|
||||
feature_extractor = None
|
||||
tried_load: bool = False
|
||||
safety_checker = None
|
||||
|
||||
@classmethod
|
||||
def _load_safety_checker(cls):
|
||||
if cls.tried_load:
|
||||
if cls.safety_checker is not None and cls.feature_extractor is not None:
|
||||
return
|
||||
|
||||
try:
|
||||
cls.safety_checker = StableDiffusionSafetyChecker.from_pretrained(get_config().models_path / CHECKER_PATH)
|
||||
cls.feature_extractor = AutoFeatureExtractor.from_pretrained(get_config().models_path / CHECKER_PATH)
|
||||
model_path = get_config().models_path / CHECKER_PATH
|
||||
if model_path.exists():
|
||||
cls.feature_extractor = AutoFeatureExtractor.from_pretrained(model_path)
|
||||
cls.safety_checker = StableDiffusionSafetyChecker.from_pretrained(model_path)
|
||||
else:
|
||||
model_path.mkdir(parents=True, exist_ok=True)
|
||||
cls.feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id)
|
||||
cls.feature_extractor.save_pretrained(model_path, safe_serialization=True)
|
||||
cls.safety_checker = StableDiffusionSafetyChecker.from_pretrained(repo_id)
|
||||
cls.safety_checker.save_pretrained(model_path, safe_serialization=True)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load NSFW checker: {str(e)}")
|
||||
cls.tried_load = True
|
||||
|
||||
@classmethod
|
||||
def safety_checker_available(cls) -> bool:
|
||||
return Path(get_config().models_path, CHECKER_PATH).exists()
|
||||
|
||||
@classmethod
|
||||
def has_nsfw_concept(cls, image: Image.Image) -> bool:
|
||||
if not cls.safety_checker_available() and cls.tried_load:
|
||||
return False
|
||||
cls._load_safety_checker()
|
||||
if cls.safety_checker is None or cls.feature_extractor is None:
|
||||
return False
|
||||
device = choose_torch_device()
|
||||
device = TorchDevice.choose_torch_device()
|
||||
features = cls.feature_extractor([image], return_tensors="pt")
|
||||
features.to(device)
|
||||
cls.safety_checker.to(device)
|
||||
@ -60,3 +61,24 @@ class SafetyChecker:
|
||||
with SilenceWarnings():
|
||||
checked_image, has_nsfw_concept = cls.safety_checker(images=x_image, clip_input=features.pixel_values)
|
||||
return has_nsfw_concept[0]
|
||||
|
||||
@classmethod
|
||||
def blur_if_nsfw(cls, image: Image.Image) -> Image.Image:
|
||||
if cls.has_nsfw_concept(image):
|
||||
logger.warning("A potentially NSFW image has been detected. Image will be blurred.")
|
||||
blurry_image = image.filter(filter=ImageFilter.GaussianBlur(radius=32))
|
||||
caution = cls._get_caution_img()
|
||||
# Center the caution image on the blurred image
|
||||
x = (blurry_image.width - caution.width) // 2
|
||||
y = (blurry_image.height - caution.height) // 2
|
||||
blurry_image.paste(caution, (x, y), caution)
|
||||
image = blurry_image
|
||||
|
||||
return image
|
||||
|
||||
@classmethod
|
||||
def _get_caution_img(cls) -> Image.Image:
|
||||
import invokeai.app.assets.images as image_assets
|
||||
|
||||
caution = Image.open(Path(image_assets.__path__[0]) / "caution.png")
|
||||
return caution.resize((caution.width // 2, caution.height // 2))
|
||||
|
@ -1,52 +0,0 @@
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
def _conv_forward_asymmetric(self, input, weight, bias):
|
||||
"""
|
||||
Patch for Conv2d._conv_forward that supports asymmetric padding
|
||||
"""
|
||||
working = nn.functional.pad(input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"])
|
||||
working = nn.functional.pad(working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"])
|
||||
return nn.functional.conv2d(
|
||||
working,
|
||||
weight,
|
||||
bias,
|
||||
self.stride,
|
||||
nn.modules.utils._pair(0),
|
||||
self.dilation,
|
||||
self.groups,
|
||||
)
|
||||
|
||||
|
||||
def configure_model_padding(model, seamless, seamless_axes):
|
||||
"""
|
||||
Modifies the 2D convolution layers to use a circular padding mode based on
|
||||
the `seamless` and `seamless_axes` options.
|
||||
"""
|
||||
# TODO: get an explicit interface for this in diffusers: https://github.com/huggingface/diffusers/issues/556
|
||||
for m in model.modules():
|
||||
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||
if seamless:
|
||||
m.asymmetric_padding_mode = {}
|
||||
m.asymmetric_padding = {}
|
||||
m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["x"] = (
|
||||
m._reversed_padding_repeated_twice[0],
|
||||
m._reversed_padding_repeated_twice[1],
|
||||
0,
|
||||
0,
|
||||
)
|
||||
m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["y"] = (
|
||||
0,
|
||||
0,
|
||||
m._reversed_padding_repeated_twice[2],
|
||||
m._reversed_padding_repeated_twice[3],
|
||||
)
|
||||
m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d)
|
||||
else:
|
||||
m._conv_forward = nn.Conv2d._conv_forward.__get__(m, nn.Conv2d)
|
||||
if hasattr(m, "asymmetric_padding_mode"):
|
||||
del m.asymmetric_padding_mode
|
||||
if hasattr(m, "asymmetric_padding"):
|
||||
del m.asymmetric_padding
|
@ -1,4 +1,5 @@
|
||||
from math import ceil, floor, sqrt
|
||||
from typing import Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
@ -143,20 +144,21 @@ def resize_image_to_resolution(input_image: np.ndarray, resolution: int) -> np.n
|
||||
h = float(input_image.shape[0])
|
||||
w = float(input_image.shape[1])
|
||||
scaling_factor = float(resolution) / min(h, w)
|
||||
h *= scaling_factor
|
||||
w *= scaling_factor
|
||||
h = int(np.round(h / 64.0)) * 64
|
||||
w = int(np.round(w / 64.0)) * 64
|
||||
h = int(h * scaling_factor)
|
||||
w = int(w * scaling_factor)
|
||||
if scaling_factor > 1:
|
||||
return cv2.resize(input_image, (w, h), interpolation=cv2.INTER_LANCZOS4)
|
||||
else:
|
||||
return cv2.resize(input_image, (w, h), interpolation=cv2.INTER_AREA)
|
||||
|
||||
|
||||
def non_maximum_suppression(image: np.ndarray, threshold: int, sigma: float):
|
||||
def nms(np_img: np.ndarray, threshold: Optional[int] = None, sigma: Optional[float] = None) -> np.ndarray:
|
||||
"""
|
||||
Apply non-maximum suppression to an image.
|
||||
|
||||
If both threshold and sigma are provided, the image will blurred before the suppression and thresholded afterwards,
|
||||
resulting in a binary output image.
|
||||
|
||||
This function is adapted from https://github.com/lllyasviel/ControlNet.
|
||||
|
||||
Args:
|
||||
@ -166,23 +168,36 @@ def non_maximum_suppression(image: np.ndarray, threshold: int, sigma: float):
|
||||
|
||||
Returns:
|
||||
The image after non-maximum suppression.
|
||||
|
||||
Raises:
|
||||
ValueError: If only one of threshold and sigma provided.
|
||||
"""
|
||||
|
||||
image = cv2.GaussianBlur(image.astype(np.float32), (0, 0), sigma)
|
||||
# Raise a value error if only one of threshold and sigma is provided
|
||||
if (threshold is None) != (sigma is None):
|
||||
raise ValueError("Both threshold and sigma must be provided if one is provided.")
|
||||
|
||||
if sigma is not None and threshold is not None:
|
||||
# Blurring the image can help to thin out features
|
||||
np_img = cv2.GaussianBlur(np_img.astype(np.float32), (0, 0), sigma)
|
||||
|
||||
filter_1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
|
||||
filter_2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
|
||||
filter_3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
|
||||
filter_4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
|
||||
|
||||
y = np.zeros_like(image)
|
||||
nms_img = np.zeros_like(np_img)
|
||||
|
||||
for f in [filter_1, filter_2, filter_3, filter_4]:
|
||||
np.putmask(y, cv2.dilate(image, kernel=f) == image, image)
|
||||
np.putmask(nms_img, cv2.dilate(np_img, kernel=f) == np_img, np_img)
|
||||
|
||||
z = np.zeros_like(y, dtype=np.uint8)
|
||||
z[y > threshold] = 255
|
||||
return z
|
||||
if sigma is not None and threshold is not None:
|
||||
# We blurred - now threshold to get a binary image
|
||||
thresholded = np.zeros_like(nms_img, dtype=np.uint8)
|
||||
thresholded[nms_img > threshold] = 255
|
||||
return thresholded
|
||||
|
||||
return nms_img
|
||||
|
||||
|
||||
def safe_step(x: np.ndarray, step: int = 2) -> np.ndarray:
|
||||
|
@ -301,12 +301,12 @@ class MainConfigBase(ModelConfigBase):
|
||||
default_settings: Optional[MainModelDefaultSettings] = Field(
|
||||
description="Default settings for this model", default=None
|
||||
)
|
||||
variant: ModelVariantType = ModelVariantType.Normal
|
||||
|
||||
|
||||
class MainCheckpointConfig(CheckpointConfigBase, MainConfigBase):
|
||||
"""Model config for main checkpoint models."""
|
||||
|
||||
variant: ModelVariantType = ModelVariantType.Normal
|
||||
prediction_type: SchedulerPredictionType = SchedulerPredictionType.Epsilon
|
||||
upcast_attention: bool = False
|
||||
|
||||
|
@ -18,7 +18,7 @@ from invokeai.backend.model_manager.load.load_base import LoadedModel, ModelLoad
|
||||
from invokeai.backend.model_manager.load.model_cache.model_cache_base import ModelCacheBase, ModelLockerBase
|
||||
from invokeai.backend.model_manager.load.model_util import calc_model_size_by_data, calc_model_size_by_fs
|
||||
from invokeai.backend.model_manager.load.optimizations import skip_torch_weight_init
|
||||
from invokeai.backend.util.devices import choose_torch_device, torch_dtype
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
# TO DO: The loader is not thread safe!
|
||||
@ -37,7 +37,7 @@ class ModelLoader(ModelLoaderBase):
|
||||
self._logger = logger
|
||||
self._ram_cache = ram_cache
|
||||
self._convert_cache = convert_cache
|
||||
self._torch_dtype = torch_dtype(choose_torch_device())
|
||||
self._torch_dtype = TorchDevice.choose_torch_dtype()
|
||||
|
||||
def load_model(self, model_config: AnyModelConfig, submodel_type: Optional[SubModelType] = None) -> LoadedModel:
|
||||
"""
|
||||
|
@ -30,15 +30,12 @@ import torch
|
||||
|
||||
from invokeai.backend.model_manager import AnyModel, SubModelType
|
||||
from invokeai.backend.model_manager.load.memory_snapshot import MemorySnapshot, get_pretty_snapshot_diff
|
||||
from invokeai.backend.util.devices import choose_torch_device
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.logging import InvokeAILogger
|
||||
|
||||
from .model_cache_base import CacheRecord, CacheStats, ModelCacheBase, ModelLockerBase
|
||||
from .model_locker import ModelLocker
|
||||
|
||||
if choose_torch_device() == torch.device("mps"):
|
||||
from torch import mps
|
||||
|
||||
# Maximum size of the cache, in gigs
|
||||
# Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously
|
||||
DEFAULT_MAX_CACHE_SIZE = 6.0
|
||||
@ -244,9 +241,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
||||
f"Removing {cache_entry.key} from VRAM to free {(cache_entry.size/GIG):.2f}GB; vram free = {(torch.cuda.memory_allocated()/GIG):.2f}GB"
|
||||
)
|
||||
|
||||
torch.cuda.empty_cache()
|
||||
if choose_torch_device() == torch.device("mps"):
|
||||
mps.empty_cache()
|
||||
TorchDevice.empty_cache()
|
||||
|
||||
def move_model_to_device(self, cache_entry: CacheRecord[AnyModel], target_device: torch.device) -> None:
|
||||
"""Move model into the indicated device.
|
||||
@ -416,10 +411,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
||||
self.stats.cleared = models_cleared
|
||||
gc.collect()
|
||||
|
||||
torch.cuda.empty_cache()
|
||||
if choose_torch_device() == torch.device("mps"):
|
||||
mps.empty_cache()
|
||||
|
||||
TorchDevice.empty_cache()
|
||||
self.logger.debug(f"After making room: cached_models={len(self._cached_models)}")
|
||||
|
||||
def _delete_cache_entry(self, cache_entry: CacheRecord[AnyModel]) -> None:
|
||||
|
@ -17,7 +17,7 @@ from diffusers.utils import logging as dlogging
|
||||
|
||||
from invokeai.app.services.model_install import ModelInstallServiceBase
|
||||
from invokeai.app.services.model_records.model_records_base import ModelRecordChanges
|
||||
from invokeai.backend.util.devices import choose_torch_device, torch_dtype
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
from . import (
|
||||
AnyModelConfig,
|
||||
@ -43,6 +43,7 @@ class ModelMerger(object):
|
||||
Initialize a ModelMerger object with the model installer.
|
||||
"""
|
||||
self._installer = installer
|
||||
self._dtype = TorchDevice.choose_torch_dtype()
|
||||
|
||||
def merge_diffusion_models(
|
||||
self,
|
||||
@ -68,7 +69,7 @@ class ModelMerger(object):
|
||||
warnings.simplefilter("ignore")
|
||||
verbosity = dlogging.get_verbosity()
|
||||
dlogging.set_verbosity_error()
|
||||
dtype = torch.float16 if variant == "fp16" else torch_dtype(choose_torch_device())
|
||||
dtype = torch.float16 if variant == "fp16" else self._dtype
|
||||
|
||||
# Note that checkpoint_merger will not work with downloaded HuggingFace fp16 models
|
||||
# until upstream https://github.com/huggingface/diffusers/pull/6670 is merged and released.
|
||||
@ -151,7 +152,7 @@ class ModelMerger(object):
|
||||
dump_path.mkdir(parents=True, exist_ok=True)
|
||||
dump_path = dump_path / merged_model_name
|
||||
|
||||
dtype = torch.float16 if variant == "fp16" else torch_dtype(choose_torch_device())
|
||||
dtype = torch.float16 if variant == "fp16" else self._dtype
|
||||
merged_pipe.save_pretrained(dump_path.as_posix(), safe_serialization=True, torch_dtype=dtype, variant=variant)
|
||||
|
||||
# register model and get its unique key
|
||||
|
@ -51,6 +51,7 @@ LEGACY_CONFIGS: Dict[BaseModelType, Dict[ModelVariantType, Union[str, Dict[Sched
|
||||
},
|
||||
BaseModelType.StableDiffusionXL: {
|
||||
ModelVariantType.Normal: "sd_xl_base.yaml",
|
||||
ModelVariantType.Inpaint: "sd_xl_inpaint.yaml",
|
||||
},
|
||||
BaseModelType.StableDiffusionXLRefiner: {
|
||||
ModelVariantType.Normal: "sd_xl_refiner.yaml",
|
||||
|
@ -155,7 +155,7 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
StarterModel(
|
||||
name="IP Adapter",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="InvokeAI/ip_adapter_sd15",
|
||||
source="https://huggingface.co/InvokeAI/ip_adapter_sd15/resolve/main/ip-adapter_sd15.safetensors",
|
||||
description="IP-Adapter for SD 1.5 models",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[ip_adapter_sd_image_encoder],
|
||||
@ -163,7 +163,7 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
StarterModel(
|
||||
name="IP Adapter Plus",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="InvokeAI/ip_adapter_plus_sd15",
|
||||
source="https://huggingface.co/InvokeAI/ip_adapter_plus_sd15/resolve/main/ip-adapter-plus_sd15.safetensors",
|
||||
description="Refined IP-Adapter for SD 1.5 models",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[ip_adapter_sd_image_encoder],
|
||||
@ -171,7 +171,7 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
StarterModel(
|
||||
name="IP Adapter Plus Face",
|
||||
base=BaseModelType.StableDiffusion1,
|
||||
source="InvokeAI/ip_adapter_plus_face_sd15",
|
||||
source="https://huggingface.co/InvokeAI/ip_adapter_plus_face_sd15/resolve/main/ip-adapter-plus-face_sd15.safetensors",
|
||||
description="Refined IP-Adapter for SD 1.5 models, adapted for faces",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[ip_adapter_sd_image_encoder],
|
||||
@ -179,7 +179,7 @@ STARTER_MODELS: list[StarterModel] = [
|
||||
StarterModel(
|
||||
name="IP Adapter SDXL",
|
||||
base=BaseModelType.StableDiffusionXL,
|
||||
source="InvokeAI/ip_adapter_sdxl",
|
||||
source="https://huggingface.co/InvokeAI/ip_adapter_sdxl_vit_h/resolve/main/ip-adapter_sdxl_vit-h.safetensors",
|
||||
description="IP-Adapter for SDXL models",
|
||||
type=ModelType.IPAdapter,
|
||||
dependencies=[ip_adapter_sdxl_image_encoder],
|
||||
|
@ -21,14 +21,11 @@ from pydantic import Field
|
||||
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
||||
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import (
|
||||
IPAdapterData,
|
||||
TextConditioningData,
|
||||
)
|
||||
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import IPAdapterData, TextConditioningData
|
||||
from invokeai.backend.stable_diffusion.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
||||
from invokeai.backend.stable_diffusion.diffusion.unet_attention_patcher import UNetAttentionPatcher
|
||||
from invokeai.backend.stable_diffusion.diffusion.unet_attention_patcher import UNetAttentionPatcher, UNetIPAdapterData
|
||||
from invokeai.backend.util.attention import auto_detect_slice_size
|
||||
from invokeai.backend.util.devices import normalize_device
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
|
||||
|
||||
@dataclass
|
||||
@ -258,7 +255,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
if self.unet.device.type == "cpu" or self.unet.device.type == "mps":
|
||||
mem_free = psutil.virtual_memory().free
|
||||
elif self.unet.device.type == "cuda":
|
||||
mem_free, _ = torch.cuda.mem_get_info(normalize_device(self.unet.device))
|
||||
mem_free, _ = torch.cuda.mem_get_info(TorchDevice.normalize(self.unet.device))
|
||||
else:
|
||||
raise ValueError(f"unrecognized device {self.unet.device}")
|
||||
# input tensor of [1, 4, h/8, w/8]
|
||||
@ -394,8 +391,13 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
|
||||
unet_attention_patcher = None
|
||||
self.use_ip_adapter = use_ip_adapter
|
||||
attn_ctx = nullcontext()
|
||||
|
||||
if use_ip_adapter or use_regional_prompting:
|
||||
ip_adapters = [ipa.ip_adapter_model for ipa in ip_adapter_data] if use_ip_adapter else None
|
||||
ip_adapters: Optional[List[UNetIPAdapterData]] = (
|
||||
[{"ip_adapter": ipa.ip_adapter_model, "target_blocks": ipa.target_blocks} for ipa in ip_adapter_data]
|
||||
if use_ip_adapter
|
||||
else None
|
||||
)
|
||||
unet_attention_patcher = UNetAttentionPatcher(ip_adapters)
|
||||
attn_ctx = unet_attention_patcher.apply_ip_adapter_attention(self.invokeai_diffuser.model)
|
||||
|
||||
|
@ -53,6 +53,7 @@ class IPAdapterData:
|
||||
ip_adapter_model: IPAdapter
|
||||
ip_adapter_conditioning: IPAdapterConditioningInfo
|
||||
mask: torch.Tensor
|
||||
target_blocks: List[str]
|
||||
|
||||
# Either a single weight applied to all steps, or a list of weights for each step.
|
||||
weight: Union[float, List[float]] = 1.0
|
||||
|
@ -1,4 +1,5 @@
|
||||
from typing import Optional
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional, cast
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
@ -9,6 +10,12 @@ from invokeai.backend.stable_diffusion.diffusion.regional_ip_data import Regiona
|
||||
from invokeai.backend.stable_diffusion.diffusion.regional_prompt_data import RegionalPromptData
|
||||
|
||||
|
||||
@dataclass
|
||||
class IPAdapterAttentionWeights:
|
||||
ip_adapter_weights: IPAttentionProcessorWeights
|
||||
skip: bool
|
||||
|
||||
|
||||
class CustomAttnProcessor2_0(AttnProcessor2_0):
|
||||
"""A custom implementation of AttnProcessor2_0 that supports additional Invoke features.
|
||||
This implementation is based on
|
||||
@ -20,7 +27,7 @@ class CustomAttnProcessor2_0(AttnProcessor2_0):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ip_adapter_weights: Optional[list[IPAttentionProcessorWeights]] = None,
|
||||
ip_adapter_attention_weights: Optional[List[IPAdapterAttentionWeights]] = None,
|
||||
):
|
||||
"""Initialize a CustomAttnProcessor2_0.
|
||||
Note: Arguments that are the same for all attention layers are passed to __call__(). Arguments that are
|
||||
@ -30,23 +37,22 @@ class CustomAttnProcessor2_0(AttnProcessor2_0):
|
||||
for the i'th IP-Adapter.
|
||||
"""
|
||||
super().__init__()
|
||||
self._ip_adapter_weights = ip_adapter_weights
|
||||
|
||||
def _is_ip_adapter_enabled(self) -> bool:
|
||||
return self._ip_adapter_weights is not None
|
||||
self._ip_adapter_attention_weights = ip_adapter_attention_weights
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
attn: Attention,
|
||||
hidden_states: torch.FloatTensor,
|
||||
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
||||
attention_mask: Optional[torch.FloatTensor] = None,
|
||||
temb: Optional[torch.FloatTensor] = None,
|
||||
# For regional prompting:
|
||||
hidden_states: torch.Tensor,
|
||||
encoder_hidden_states: Optional[torch.Tensor] = None,
|
||||
attention_mask: Optional[torch.Tensor] = None,
|
||||
temb: Optional[torch.Tensor] = None,
|
||||
# For Regional Prompting:
|
||||
regional_prompt_data: Optional[RegionalPromptData] = None,
|
||||
percent_through: Optional[torch.FloatTensor] = None,
|
||||
percent_through: Optional[torch.Tensor] = None,
|
||||
# For IP-Adapter:
|
||||
regional_ip_data: Optional[RegionalIPData] = None,
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> torch.FloatTensor:
|
||||
"""Apply attention.
|
||||
Args:
|
||||
@ -130,17 +136,19 @@ class CustomAttnProcessor2_0(AttnProcessor2_0):
|
||||
|
||||
# Apply IP-Adapter conditioning.
|
||||
if is_cross_attention:
|
||||
if self._is_ip_adapter_enabled():
|
||||
if self._ip_adapter_attention_weights:
|
||||
assert regional_ip_data is not None
|
||||
ip_masks = regional_ip_data.get_masks(query_seq_len=query_seq_len)
|
||||
|
||||
assert (
|
||||
len(regional_ip_data.image_prompt_embeds)
|
||||
== len(self._ip_adapter_weights)
|
||||
== len(self._ip_adapter_attention_weights)
|
||||
== len(regional_ip_data.scales)
|
||||
== ip_masks.shape[1]
|
||||
)
|
||||
|
||||
for ipa_index, ipa_embed in enumerate(regional_ip_data.image_prompt_embeds):
|
||||
ipa_weights = self._ip_adapter_weights[ipa_index]
|
||||
ipa_weights = self._ip_adapter_attention_weights[ipa_index].ip_adapter_weights
|
||||
ipa_scale = regional_ip_data.scales[ipa_index]
|
||||
ip_mask = ip_masks[0, ipa_index, ...]
|
||||
|
||||
@ -153,29 +161,33 @@ class CustomAttnProcessor2_0(AttnProcessor2_0):
|
||||
|
||||
# Expected ip_hidden_state shape: (batch_size, num_ip_images, ip_seq_len, ip_image_embedding)
|
||||
|
||||
ip_key = ipa_weights.to_k_ip(ip_hidden_states)
|
||||
ip_value = ipa_weights.to_v_ip(ip_hidden_states)
|
||||
if not self._ip_adapter_attention_weights[ipa_index].skip:
|
||||
ip_key = ipa_weights.to_k_ip(ip_hidden_states)
|
||||
ip_value = ipa_weights.to_v_ip(ip_hidden_states)
|
||||
|
||||
# Expected ip_key and ip_value shape: (batch_size, num_ip_images, ip_seq_len, head_dim * num_heads)
|
||||
# Expected ip_key and ip_value shape:
|
||||
# (batch_size, num_ip_images, ip_seq_len, head_dim * num_heads)
|
||||
|
||||
ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
||||
ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
||||
ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
||||
ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
||||
|
||||
# Expected ip_key and ip_value shape: (batch_size, num_heads, num_ip_images * ip_seq_len, head_dim)
|
||||
# Expected ip_key and ip_value shape:
|
||||
# (batch_size, num_heads, num_ip_images * ip_seq_len, head_dim)
|
||||
|
||||
# TODO: add support for attn.scale when we move to Torch 2.1
|
||||
ip_hidden_states = F.scaled_dot_product_attention(
|
||||
query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False
|
||||
)
|
||||
# TODO: add support for attn.scale when we move to Torch 2.1
|
||||
ip_hidden_states = F.scaled_dot_product_attention(
|
||||
query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False
|
||||
)
|
||||
|
||||
# Expected ip_hidden_states shape: (batch_size, num_heads, query_seq_len, head_dim)
|
||||
# Expected ip_hidden_states shape: (batch_size, num_heads, query_seq_len, head_dim)
|
||||
ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape(
|
||||
batch_size, -1, attn.heads * head_dim
|
||||
)
|
||||
|
||||
ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
|
||||
ip_hidden_states = ip_hidden_states.to(query.dtype)
|
||||
ip_hidden_states = ip_hidden_states.to(query.dtype)
|
||||
|
||||
# Expected ip_hidden_states shape: (batch_size, query_seq_len, num_heads * head_dim)
|
||||
|
||||
hidden_states = hidden_states + ipa_scale * ip_hidden_states * ip_mask
|
||||
# Expected ip_hidden_states shape: (batch_size, query_seq_len, num_heads * head_dim)
|
||||
hidden_states = hidden_states + ipa_scale * ip_hidden_states * ip_mask
|
||||
else:
|
||||
# If IP-Adapter is not enabled, then regional_ip_data should not be passed in.
|
||||
assert regional_ip_data is None
|
||||
@ -188,11 +200,15 @@ class CustomAttnProcessor2_0(AttnProcessor2_0):
|
||||
hidden_states = attn.to_out[1](hidden_states)
|
||||
|
||||
if input_ndim == 4:
|
||||
batch_size, channel, height, width = hidden_states.shape
|
||||
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
||||
|
||||
if attn.residual_connection:
|
||||
hidden_states = hidden_states + residual
|
||||
|
||||
hidden_states = hidden_states / attn.rescale_output_factor
|
||||
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
# End of unmodified block from AttnProcessor2_0
|
||||
|
||||
return hidden_states
|
||||
# casting torch.Tensor to torch.FloatTensor to avoid type issues
|
||||
return cast(torch.FloatTensor, hidden_states)
|
||||
|
@ -1,17 +1,25 @@
|
||||
from contextlib import contextmanager
|
||||
from typing import Optional
|
||||
from typing import List, Optional, TypedDict
|
||||
|
||||
from diffusers.models import UNet2DConditionModel
|
||||
|
||||
from invokeai.backend.ip_adapter.ip_adapter import IPAdapter
|
||||
from invokeai.backend.stable_diffusion.diffusion.custom_atttention import CustomAttnProcessor2_0
|
||||
from invokeai.backend.stable_diffusion.diffusion.custom_atttention import (
|
||||
CustomAttnProcessor2_0,
|
||||
IPAdapterAttentionWeights,
|
||||
)
|
||||
|
||||
|
||||
class UNetIPAdapterData(TypedDict):
|
||||
ip_adapter: IPAdapter
|
||||
target_blocks: List[str]
|
||||
|
||||
|
||||
class UNetAttentionPatcher:
|
||||
"""A class for patching a UNet with CustomAttnProcessor2_0 attention layers."""
|
||||
|
||||
def __init__(self, ip_adapters: Optional[list[IPAdapter]]):
|
||||
self._ip_adapters = ip_adapters
|
||||
def __init__(self, ip_adapter_data: Optional[List[UNetIPAdapterData]]):
|
||||
self._ip_adapters = ip_adapter_data
|
||||
|
||||
def _prepare_attention_processors(self, unet: UNet2DConditionModel):
|
||||
"""Prepare a dict of attention processors that can be injected into a unet, and load the IP-Adapter attention
|
||||
@ -26,9 +34,22 @@ class UNetAttentionPatcher:
|
||||
attn_procs[name] = CustomAttnProcessor2_0()
|
||||
else:
|
||||
# Collect the weights from each IP Adapter for the idx'th attention processor.
|
||||
attn_procs[name] = CustomAttnProcessor2_0(
|
||||
[ip_adapter.attn_weights.get_attention_processor_weights(idx) for ip_adapter in self._ip_adapters],
|
||||
)
|
||||
ip_adapter_attention_weights_collection: list[IPAdapterAttentionWeights] = []
|
||||
|
||||
for ip_adapter in self._ip_adapters:
|
||||
ip_adapter_weights = ip_adapter["ip_adapter"].attn_weights.get_attention_processor_weights(idx)
|
||||
skip = True
|
||||
for block in ip_adapter["target_blocks"]:
|
||||
if block in name:
|
||||
skip = False
|
||||
break
|
||||
ip_adapter_attention_weights: IPAdapterAttentionWeights = IPAdapterAttentionWeights(
|
||||
ip_adapter_weights=ip_adapter_weights, skip=skip
|
||||
)
|
||||
ip_adapter_attention_weights_collection.append(ip_adapter_attention_weights)
|
||||
|
||||
attn_procs[name] = CustomAttnProcessor2_0(ip_adapter_attention_weights_collection)
|
||||
|
||||
return attn_procs
|
||||
|
||||
@contextmanager
|
||||
|
@ -13,6 +13,7 @@ from diffusers import (
|
||||
LCMScheduler,
|
||||
LMSDiscreteScheduler,
|
||||
PNDMScheduler,
|
||||
TCDScheduler,
|
||||
UniPCMultistepScheduler,
|
||||
)
|
||||
|
||||
@ -40,4 +41,5 @@ SCHEDULER_MAP = {
|
||||
"dpmpp_sde_k": (DPMSolverSDEScheduler, {"use_karras_sigmas": True, "noise_sampler_seed": 0}),
|
||||
"unipc": (UniPCMultistepScheduler, {"cpu_only": True}),
|
||||
"lcm": (LCMScheduler, {}),
|
||||
"tcd": (TCDScheduler, {}),
|
||||
}
|
||||
|
@ -1,89 +1,51 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import contextmanager
|
||||
from typing import Callable, List, Union
|
||||
from typing import Callable, List, Optional, Tuple, Union
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL
|
||||
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
|
||||
from diffusers.models.lora import LoRACompatibleConv
|
||||
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
|
||||
|
||||
|
||||
def _conv_forward_asymmetric(self, input, weight, bias):
|
||||
"""
|
||||
Patch for Conv2d._conv_forward that supports asymmetric padding
|
||||
"""
|
||||
working = nn.functional.pad(input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"])
|
||||
working = nn.functional.pad(working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"])
|
||||
return nn.functional.conv2d(
|
||||
working,
|
||||
weight,
|
||||
bias,
|
||||
self.stride,
|
||||
nn.modules.utils._pair(0),
|
||||
self.dilation,
|
||||
self.groups,
|
||||
)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL, AutoencoderTiny], seamless_axes: List[str]):
|
||||
if not seamless_axes:
|
||||
yield
|
||||
return
|
||||
|
||||
# Callable: (input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor
|
||||
to_restore: list[tuple[nn.Conv2d | nn.ConvTranspose2d, Callable]] = []
|
||||
# override conv_forward
|
||||
# https://github.com/huggingface/diffusers/issues/556#issuecomment-1993287019
|
||||
def _conv_forward_asymmetric(self, input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None):
|
||||
self.paddingX = (self._reversed_padding_repeated_twice[0], self._reversed_padding_repeated_twice[1], 0, 0)
|
||||
self.paddingY = (0, 0, self._reversed_padding_repeated_twice[2], self._reversed_padding_repeated_twice[3])
|
||||
working = torch.nn.functional.pad(input, self.paddingX, mode=x_mode)
|
||||
working = torch.nn.functional.pad(working, self.paddingY, mode=y_mode)
|
||||
return torch.nn.functional.conv2d(
|
||||
working, weight, bias, self.stride, torch.nn.modules.utils._pair(0), self.dilation, self.groups
|
||||
)
|
||||
|
||||
original_layers: List[Tuple[nn.Conv2d, Callable]] = []
|
||||
|
||||
try:
|
||||
# Hard coded to skip down block layers, allowing for seamless tiling at the expense of prompt adherence
|
||||
skipped_layers = 1
|
||||
for m_name, m in model.named_modules():
|
||||
if not isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||
continue
|
||||
x_mode = "circular" if "x" in seamless_axes else "constant"
|
||||
y_mode = "circular" if "y" in seamless_axes else "constant"
|
||||
|
||||
if isinstance(model, UNet2DConditionModel) and m_name.startswith("down_blocks.") and ".resnets." in m_name:
|
||||
# down_blocks.1.resnets.1.conv1
|
||||
_, block_num, _, resnet_num, submodule_name = m_name.split(".")
|
||||
block_num = int(block_num)
|
||||
resnet_num = int(resnet_num)
|
||||
conv_layers: List[torch.nn.Conv2d] = []
|
||||
|
||||
if block_num >= len(model.down_blocks) - skipped_layers:
|
||||
continue
|
||||
for module in model.modules():
|
||||
if isinstance(module, torch.nn.Conv2d):
|
||||
conv_layers.append(module)
|
||||
|
||||
# Skip the second resnet (could be configurable)
|
||||
if resnet_num > 0:
|
||||
continue
|
||||
|
||||
# Skip Conv2d layers (could be configurable)
|
||||
if submodule_name == "conv2":
|
||||
continue
|
||||
|
||||
m.asymmetric_padding_mode = {}
|
||||
m.asymmetric_padding = {}
|
||||
m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["x"] = (
|
||||
m._reversed_padding_repeated_twice[0],
|
||||
m._reversed_padding_repeated_twice[1],
|
||||
0,
|
||||
0,
|
||||
)
|
||||
m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant"
|
||||
m.asymmetric_padding["y"] = (
|
||||
0,
|
||||
0,
|
||||
m._reversed_padding_repeated_twice[2],
|
||||
m._reversed_padding_repeated_twice[3],
|
||||
)
|
||||
|
||||
to_restore.append((m, m._conv_forward))
|
||||
m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d)
|
||||
for layer in conv_layers:
|
||||
if isinstance(layer, LoRACompatibleConv) and layer.lora_layer is None:
|
||||
layer.lora_layer = lambda *x: 0
|
||||
original_layers.append((layer, layer._conv_forward))
|
||||
layer._conv_forward = _conv_forward_asymmetric.__get__(layer, torch.nn.Conv2d)
|
||||
|
||||
yield
|
||||
|
||||
finally:
|
||||
for module, orig_conv_forward in to_restore:
|
||||
module._conv_forward = orig_conv_forward
|
||||
if hasattr(module, "asymmetric_padding_mode"):
|
||||
del module.asymmetric_padding_mode
|
||||
if hasattr(module, "asymmetric_padding"):
|
||||
del module.asymmetric_padding
|
||||
for layer, orig_conv_forward in original_layers:
|
||||
layer._conv_forward = orig_conv_forward
|
||||
|
@ -2,7 +2,6 @@
|
||||
Initialization file for invokeai.backend.util
|
||||
"""
|
||||
|
||||
from .devices import choose_precision, choose_torch_device
|
||||
from .logging import InvokeAILogger
|
||||
from .util import GIG, Chdir, directory_size
|
||||
|
||||
@ -11,6 +10,4 @@ __all__ = [
|
||||
"directory_size",
|
||||
"Chdir",
|
||||
"InvokeAILogger",
|
||||
"choose_precision",
|
||||
"choose_torch_device",
|
||||
]
|
||||
|
29
invokeai/backend/util/catch_sigint.py
Normal file
29
invokeai/backend/util/catch_sigint.py
Normal file
@ -0,0 +1,29 @@
|
||||
"""
|
||||
This module defines a context manager `catch_sigint()` which temporarily replaces
|
||||
the sigINT handler defined by the ASGI in order to allow the user to ^C the application
|
||||
and shut it down immediately. This was implemented in order to allow the user to interrupt
|
||||
slow model hashing during startup.
|
||||
|
||||
Use like this:
|
||||
|
||||
from invokeai.backend.util.catch_sigint import catch_sigint
|
||||
with catch_sigint():
|
||||
run_some_hard_to_interrupt_process()
|
||||
"""
|
||||
|
||||
import signal
|
||||
from contextlib import contextmanager
|
||||
from typing import Generator
|
||||
|
||||
|
||||
def sigint_handler(signum, frame): # type: ignore
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
signal.raise_signal(signal.SIGINT)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def catch_sigint() -> Generator[None, None, None]:
|
||||
original_handler = signal.getsignal(signal.SIGINT)
|
||||
signal.signal(signal.SIGINT, sigint_handler)
|
||||
yield
|
||||
signal.signal(signal.SIGINT, original_handler)
|
@ -1,89 +1,110 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import nullcontext
|
||||
from typing import Literal, Optional, Union
|
||||
from typing import Dict, Literal, Optional, Union
|
||||
|
||||
import torch
|
||||
from torch import autocast
|
||||
from deprecated import deprecated
|
||||
|
||||
from invokeai.app.services.config.config_default import PRECISION, get_config
|
||||
from invokeai.app.services.config.config_default import get_config
|
||||
|
||||
# legacy APIs
|
||||
TorchPrecisionNames = Literal["float32", "float16", "bfloat16"]
|
||||
CPU_DEVICE = torch.device("cpu")
|
||||
CUDA_DEVICE = torch.device("cuda")
|
||||
MPS_DEVICE = torch.device("mps")
|
||||
|
||||
|
||||
@deprecated("Use TorchDevice.choose_torch_dtype() instead.") # type: ignore
|
||||
def choose_precision(device: torch.device) -> TorchPrecisionNames:
|
||||
"""Return the string representation of the recommended torch device."""
|
||||
torch_dtype = TorchDevice.choose_torch_dtype(device)
|
||||
return PRECISION_TO_NAME[torch_dtype]
|
||||
|
||||
|
||||
@deprecated("Use TorchDevice.choose_torch_device() instead.") # type: ignore
|
||||
def choose_torch_device() -> torch.device:
|
||||
"""Convenience routine for guessing which GPU device to run model on"""
|
||||
config = get_config()
|
||||
if config.device == "auto":
|
||||
if torch.cuda.is_available():
|
||||
return torch.device("cuda")
|
||||
if hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
|
||||
return torch.device("mps")
|
||||
"""Return the torch.device to use for accelerated inference."""
|
||||
return TorchDevice.choose_torch_device()
|
||||
|
||||
|
||||
@deprecated("Use TorchDevice.choose_torch_dtype() instead.") # type: ignore
|
||||
def torch_dtype(device: torch.device) -> torch.dtype:
|
||||
"""Return the torch precision for the recommended torch device."""
|
||||
return TorchDevice.choose_torch_dtype(device)
|
||||
|
||||
|
||||
NAME_TO_PRECISION: Dict[TorchPrecisionNames, torch.dtype] = {
|
||||
"float32": torch.float32,
|
||||
"float16": torch.float16,
|
||||
"bfloat16": torch.bfloat16,
|
||||
}
|
||||
PRECISION_TO_NAME: Dict[torch.dtype, TorchPrecisionNames] = {v: k for k, v in NAME_TO_PRECISION.items()}
|
||||
|
||||
|
||||
class TorchDevice:
|
||||
"""Abstraction layer for torch devices."""
|
||||
|
||||
@classmethod
|
||||
def choose_torch_device(cls) -> torch.device:
|
||||
"""Return the torch.device to use for accelerated inference."""
|
||||
app_config = get_config()
|
||||
if app_config.device != "auto":
|
||||
device = torch.device(app_config.device)
|
||||
elif torch.cuda.is_available():
|
||||
device = CUDA_DEVICE
|
||||
elif torch.backends.mps.is_available():
|
||||
device = MPS_DEVICE
|
||||
else:
|
||||
return CPU_DEVICE
|
||||
else:
|
||||
return torch.device(config.device)
|
||||
device = CPU_DEVICE
|
||||
return cls.normalize(device)
|
||||
|
||||
@classmethod
|
||||
def choose_torch_dtype(cls, device: Optional[torch.device] = None) -> torch.dtype:
|
||||
"""Return the precision to use for accelerated inference."""
|
||||
device = device or cls.choose_torch_device()
|
||||
config = get_config()
|
||||
if device.type == "cuda" and torch.cuda.is_available():
|
||||
device_name = torch.cuda.get_device_name(device)
|
||||
if "GeForce GTX 1660" in device_name or "GeForce GTX 1650" in device_name:
|
||||
# These GPUs have limited support for float16
|
||||
return cls._to_dtype("float32")
|
||||
elif config.precision == "auto":
|
||||
# Default to float16 for CUDA devices
|
||||
return cls._to_dtype("float16")
|
||||
else:
|
||||
# Use the user-defined precision
|
||||
return cls._to_dtype(config.precision)
|
||||
|
||||
def get_torch_device_name() -> str:
|
||||
device = choose_torch_device()
|
||||
return torch.cuda.get_device_name(device) if device.type == "cuda" else device.type.upper()
|
||||
elif device.type == "mps" and torch.backends.mps.is_available():
|
||||
if config.precision == "auto":
|
||||
# Default to float16 for MPS devices
|
||||
return cls._to_dtype("float16")
|
||||
else:
|
||||
# Use the user-defined precision
|
||||
return cls._to_dtype(config.precision)
|
||||
# CPU / safe fallback
|
||||
return cls._to_dtype("float32")
|
||||
|
||||
@classmethod
|
||||
def get_torch_device_name(cls) -> str:
|
||||
"""Return the device name for the current torch device."""
|
||||
device = cls.choose_torch_device()
|
||||
return torch.cuda.get_device_name(device) if device.type == "cuda" else device.type.upper()
|
||||
|
||||
def choose_precision(device: torch.device) -> Literal["float32", "float16", "bfloat16"]:
|
||||
"""Return an appropriate precision for the given torch device."""
|
||||
app_config = get_config()
|
||||
if device.type == "cuda":
|
||||
device_name = torch.cuda.get_device_name(device)
|
||||
if "GeForce GTX 1660" in device_name or "GeForce GTX 1650" in device_name:
|
||||
# These GPUs have limited support for float16
|
||||
return "float32"
|
||||
elif app_config.precision == "auto" or app_config.precision == "autocast":
|
||||
# Default to float16 for CUDA devices
|
||||
return "float16"
|
||||
else:
|
||||
# Use the user-defined precision
|
||||
return app_config.precision
|
||||
elif device.type == "mps":
|
||||
if app_config.precision == "auto" or app_config.precision == "autocast":
|
||||
# Default to float16 for MPS devices
|
||||
return "float16"
|
||||
else:
|
||||
# Use the user-defined precision
|
||||
return app_config.precision
|
||||
# CPU / safe fallback
|
||||
return "float32"
|
||||
|
||||
|
||||
def torch_dtype(device: Optional[torch.device] = None) -> torch.dtype:
|
||||
device = device or choose_torch_device()
|
||||
precision = choose_precision(device)
|
||||
if precision == "float16":
|
||||
return torch.float16
|
||||
if precision == "bfloat16":
|
||||
return torch.bfloat16
|
||||
else:
|
||||
# "auto", "autocast", "float32"
|
||||
return torch.float32
|
||||
|
||||
|
||||
def choose_autocast(precision: PRECISION):
|
||||
"""Returns an autocast context or nullcontext for the given precision string"""
|
||||
# float16 currently requires autocast to avoid errors like:
|
||||
# 'expected scalar type Half but found Float'
|
||||
if precision == "autocast" or precision == "float16":
|
||||
return autocast
|
||||
return nullcontext
|
||||
|
||||
|
||||
def normalize_device(device: Union[str, torch.device]) -> torch.device:
|
||||
"""Ensure device has a device index defined, if appropriate."""
|
||||
device = torch.device(device)
|
||||
if device.index is None:
|
||||
# cuda might be the only torch backend that currently uses the device index?
|
||||
# I don't see anything like `current_device` for cpu or mps.
|
||||
if device.type == "cuda":
|
||||
@classmethod
|
||||
def normalize(cls, device: Union[str, torch.device]) -> torch.device:
|
||||
"""Add the device index to CUDA devices."""
|
||||
device = torch.device(device)
|
||||
if device.index is None and device.type == "cuda" and torch.cuda.is_available():
|
||||
device = torch.device(device.type, torch.cuda.current_device())
|
||||
return device
|
||||
return device
|
||||
|
||||
@classmethod
|
||||
def empty_cache(cls) -> None:
|
||||
"""Clear the GPU device cache."""
|
||||
if torch.backends.mps.is_available():
|
||||
torch.mps.empty_cache()
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
@classmethod
|
||||
def _to_dtype(cls, precision_name: TorchPrecisionNames) -> torch.dtype:
|
||||
return NAME_TO_PRECISION[precision_name]
|
||||
|
98
invokeai/configs/stable-diffusion/sd_xl_inpaint.yaml
Normal file
98
invokeai/configs/stable-diffusion/sd_xl_inpaint.yaml
Normal file
@ -0,0 +1,98 @@
|
||||
model:
|
||||
target: sgm.models.diffusion.DiffusionEngine
|
||||
params:
|
||||
scale_factor: 0.13025
|
||||
disable_first_stage_autocast: True
|
||||
|
||||
denoiser_config:
|
||||
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
|
||||
params:
|
||||
num_idx: 1000
|
||||
|
||||
weighting_config:
|
||||
target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
|
||||
scaling_config:
|
||||
target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
|
||||
discretization_config:
|
||||
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
|
||||
|
||||
network_config:
|
||||
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
|
||||
params:
|
||||
adm_in_channels: 2816
|
||||
num_classes: sequential
|
||||
use_checkpoint: True
|
||||
in_channels: 9
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [4, 2]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [1, 2, 4]
|
||||
num_head_channels: 64
|
||||
use_spatial_transformer: True
|
||||
use_linear_in_transformer: True
|
||||
transformer_depth: [1, 2, 10] # note: the first is unused (due to attn_res starting at 2) 32, 16, 8 --> 64, 32, 16
|
||||
context_dim: 2048
|
||||
spatial_transformer_attn_type: softmax-xformers
|
||||
legacy: False
|
||||
|
||||
conditioner_config:
|
||||
target: sgm.modules.GeneralConditioner
|
||||
params:
|
||||
emb_models:
|
||||
# crossattn cond
|
||||
- is_trainable: False
|
||||
input_key: txt
|
||||
target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
|
||||
params:
|
||||
layer: hidden
|
||||
layer_idx: 11
|
||||
# crossattn and vector cond
|
||||
- is_trainable: False
|
||||
input_key: txt
|
||||
target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
|
||||
params:
|
||||
arch: ViT-bigG-14
|
||||
version: laion2b_s39b_b160k
|
||||
freeze: True
|
||||
layer: penultimate
|
||||
always_return_pooled: True
|
||||
legacy: False
|
||||
# vector cond
|
||||
- is_trainable: False
|
||||
input_key: original_size_as_tuple
|
||||
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256 # multiplied by two
|
||||
# vector cond
|
||||
- is_trainable: False
|
||||
input_key: crop_coords_top_left
|
||||
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256 # multiplied by two
|
||||
# vector cond
|
||||
- is_trainable: False
|
||||
input_key: target_size_as_tuple
|
||||
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256 # multiplied by two
|
||||
|
||||
first_stage_config:
|
||||
target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
attn_type: vanilla-xformers
|
||||
double_z: true
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [1, 2, 4, 4]
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
@ -10,6 +10,8 @@ module.exports = {
|
||||
'path/no-relative-imports': ['error', { maxDepth: 0 }],
|
||||
// https://github.com/edvardchen/eslint-plugin-i18next/blob/HEAD/docs/rules/no-literal-string.md
|
||||
'i18next/no-literal-string': 'error',
|
||||
// https://eslint.org/docs/latest/rules/no-console
|
||||
'no-console': 'error',
|
||||
},
|
||||
overrides: [
|
||||
/**
|
||||
|
3
invokeai/frontend/web/.gitignore
vendored
3
invokeai/frontend/web/.gitignore
vendored
@ -43,4 +43,5 @@ stats.html
|
||||
yalc.lock
|
||||
|
||||
# vitest
|
||||
tsconfig.vitest-temp.json
|
||||
tsconfig.vitest-temp.json
|
||||
coverage/
|
@ -11,6 +11,7 @@ import { createStore } from '../src/app/store/store';
|
||||
// @ts-ignore
|
||||
import translationEN from '../public/locales/en.json';
|
||||
import { ReduxInit } from './ReduxInit';
|
||||
import { $store } from 'app/store/nanostores/store';
|
||||
|
||||
i18n.use(initReactI18next).init({
|
||||
lng: 'en',
|
||||
@ -25,6 +26,7 @@ i18n.use(initReactI18next).init({
|
||||
});
|
||||
|
||||
const store = createStore(undefined, false);
|
||||
$store.set(store);
|
||||
$baseUrl.set('http://localhost:9090');
|
||||
|
||||
const preview: Preview = {
|
||||
|
@ -35,6 +35,7 @@
|
||||
"storybook": "storybook dev -p 6006",
|
||||
"build-storybook": "storybook build",
|
||||
"test": "vitest",
|
||||
"test:ui": "vitest --coverage --ui",
|
||||
"test:no-watch": "vitest --no-watch"
|
||||
},
|
||||
"madge": {
|
||||
@ -52,58 +53,61 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@chakra-ui/react-use-size": "^2.1.0",
|
||||
"@dagrejs/dagre": "^1.1.1",
|
||||
"@dagrejs/graphlib": "^2.2.1",
|
||||
"@dagrejs/dagre": "^1.1.2",
|
||||
"@dagrejs/graphlib": "^2.2.2",
|
||||
"@dnd-kit/core": "^6.1.0",
|
||||
"@dnd-kit/sortable": "^8.0.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@fontsource-variable/inter": "^5.0.17",
|
||||
"@invoke-ai/ui-library": "^0.0.21",
|
||||
"@fontsource-variable/inter": "^5.0.18",
|
||||
"@invoke-ai/ui-library": "^0.0.25",
|
||||
"@nanostores/react": "^0.7.2",
|
||||
"@reduxjs/toolkit": "2.2.2",
|
||||
"@reduxjs/toolkit": "2.2.3",
|
||||
"@roarr/browser-log-writer": "^1.3.0",
|
||||
"chakra-react-select": "^4.7.6",
|
||||
"compare-versions": "^6.1.0",
|
||||
"dateformat": "^5.0.3",
|
||||
"framer-motion": "^11.0.22",
|
||||
"i18next": "^23.10.1",
|
||||
"i18next-http-backend": "^2.5.0",
|
||||
"fracturedjsonjs": "^4.0.1",
|
||||
"framer-motion": "^11.1.8",
|
||||
"i18next": "^23.11.3",
|
||||
"i18next-http-backend": "^2.5.1",
|
||||
"idb-keyval": "^6.2.1",
|
||||
"jsondiffpatch": "^0.6.0",
|
||||
"konva": "^9.3.6",
|
||||
"lodash-es": "^4.17.21",
|
||||
"nanostores": "^0.10.0",
|
||||
"nanostores": "^0.10.3",
|
||||
"new-github-issue-url": "^1.0.0",
|
||||
"overlayscrollbars": "^2.6.1",
|
||||
"overlayscrollbars-react": "^0.5.5",
|
||||
"overlayscrollbars": "^2.7.3",
|
||||
"overlayscrollbars-react": "^0.5.6",
|
||||
"query-string": "^9.0.0",
|
||||
"react": "^18.2.0",
|
||||
"react": "^18.3.1",
|
||||
"react-colorful": "^5.6.1",
|
||||
"react-dom": "^18.2.0",
|
||||
"react-dom": "^18.3.1",
|
||||
"react-dropzone": "^14.2.3",
|
||||
"react-error-boundary": "^4.0.13",
|
||||
"react-hook-form": "^7.51.2",
|
||||
"react-hook-form": "^7.51.4",
|
||||
"react-hotkeys-hook": "4.5.0",
|
||||
"react-i18next": "^14.1.0",
|
||||
"react-icons": "^5.0.1",
|
||||
"react-i18next": "^14.1.1",
|
||||
"react-icons": "^5.2.0",
|
||||
"react-konva": "^18.2.10",
|
||||
"react-redux": "9.1.0",
|
||||
"react-resizable-panels": "^2.0.16",
|
||||
"react-redux": "9.1.2",
|
||||
"react-resizable-panels": "^2.0.19",
|
||||
"react-select": "5.8.0",
|
||||
"react-use": "^17.5.0",
|
||||
"react-virtuoso": "^4.7.5",
|
||||
"reactflow": "^11.10.4",
|
||||
"react-virtuoso": "^4.7.10",
|
||||
"reactflow": "^11.11.3",
|
||||
"redux-dynamic-middlewares": "^2.2.0",
|
||||
"redux-remember": "^5.1.0",
|
||||
"redux-undo": "^1.1.0",
|
||||
"rfdc": "^1.3.1",
|
||||
"roarr": "^7.21.1",
|
||||
"serialize-error": "^11.0.3",
|
||||
"socket.io-client": "^4.7.5",
|
||||
"use-debounce": "^10.0.0",
|
||||
"use-device-pixel-ratio": "^1.1.2",
|
||||
"use-image": "^1.1.1",
|
||||
"uuid": "^9.0.1",
|
||||
"zod": "^3.22.4",
|
||||
"zod-validation-error": "^3.0.3"
|
||||
"zod": "^3.23.6",
|
||||
"zod-validation-error": "^3.2.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@chakra-ui/react": "^2.8.2",
|
||||
@ -114,40 +118,42 @@
|
||||
"devDependencies": {
|
||||
"@invoke-ai/eslint-config-react": "^0.0.14",
|
||||
"@invoke-ai/prettier-config-react": "^0.0.7",
|
||||
"@storybook/addon-essentials": "^8.0.4",
|
||||
"@storybook/addon-interactions": "^8.0.4",
|
||||
"@storybook/addon-links": "^8.0.4",
|
||||
"@storybook/addon-storysource": "^8.0.4",
|
||||
"@storybook/manager-api": "^8.0.4",
|
||||
"@storybook/react": "^8.0.4",
|
||||
"@storybook/react-vite": "^8.0.4",
|
||||
"@storybook/theming": "^8.0.4",
|
||||
"@storybook/addon-essentials": "^8.0.10",
|
||||
"@storybook/addon-interactions": "^8.0.10",
|
||||
"@storybook/addon-links": "^8.0.10",
|
||||
"@storybook/addon-storysource": "^8.0.10",
|
||||
"@storybook/manager-api": "^8.0.10",
|
||||
"@storybook/react": "^8.0.10",
|
||||
"@storybook/react-vite": "^8.0.10",
|
||||
"@storybook/theming": "^8.0.10",
|
||||
"@types/dateformat": "^5.0.2",
|
||||
"@types/lodash-es": "^4.17.12",
|
||||
"@types/node": "^20.11.30",
|
||||
"@types/react": "^18.2.73",
|
||||
"@types/react-dom": "^18.2.22",
|
||||
"@types/node": "^20.12.10",
|
||||
"@types/react": "^18.3.1",
|
||||
"@types/react-dom": "^18.3.0",
|
||||
"@types/uuid": "^9.0.8",
|
||||
"@vitejs/plugin-react-swc": "^3.6.0",
|
||||
"@vitest/coverage-v8": "^1.5.0",
|
||||
"@vitest/ui": "^1.5.0",
|
||||
"concurrently": "^8.2.2",
|
||||
"dpdm": "^3.14.0",
|
||||
"eslint": "^8.57.0",
|
||||
"eslint-plugin-i18next": "^6.0.3",
|
||||
"eslint-plugin-path": "^1.3.0",
|
||||
"knip": "^5.6.1",
|
||||
"knip": "^5.12.3",
|
||||
"openapi-types": "^12.1.3",
|
||||
"openapi-typescript": "^6.7.5",
|
||||
"prettier": "^3.2.5",
|
||||
"rollup-plugin-visualizer": "^5.12.0",
|
||||
"storybook": "^8.0.4",
|
||||
"storybook": "^8.0.10",
|
||||
"ts-toolbelt": "^9.6.0",
|
||||
"tsafe": "^1.6.6",
|
||||
"typescript": "^5.4.3",
|
||||
"vite": "^5.2.6",
|
||||
"vite-plugin-css-injected-by-js": "^3.5.0",
|
||||
"vite-plugin-dts": "^3.8.0",
|
||||
"typescript": "^5.4.5",
|
||||
"vite": "^5.2.11",
|
||||
"vite-plugin-css-injected-by-js": "^3.5.1",
|
||||
"vite-plugin-dts": "^3.9.1",
|
||||
"vite-plugin-eslint": "^1.8.1",
|
||||
"vite-tsconfig-paths": "^4.3.2",
|
||||
"vitest": "^1.4.0"
|
||||
"vitest": "^1.6.0"
|
||||
}
|
||||
}
|
||||
|
5463
invokeai/frontend/web/pnpm-lock.yaml
generated
5463
invokeai/frontend/web/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
BIN
invokeai/frontend/web/public/assets/images/transparent_bg.png
Normal file
BIN
invokeai/frontend/web/public/assets/images/transparent_bg.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.7 KiB |
@ -76,7 +76,9 @@
|
||||
"aboutHeading": "Nutzen Sie Ihre kreative Energie",
|
||||
"toResolve": "Lösen",
|
||||
"add": "Hinzufügen",
|
||||
"loglevel": "Protokoll Stufe"
|
||||
"loglevel": "Protokoll Stufe",
|
||||
"selected": "Ausgewählt",
|
||||
"beta": "Beta"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Bildgröße",
|
||||
@ -85,7 +87,8 @@
|
||||
"loadMore": "Mehr laden",
|
||||
"noImagesInGallery": "Keine Bilder in der Galerie",
|
||||
"loading": "Lade",
|
||||
"deleteImage": "Lösche Bild",
|
||||
"deleteImage_one": "Lösche Bild",
|
||||
"deleteImage_other": "Lösche {{count}} Bilder",
|
||||
"copy": "Kopieren",
|
||||
"download": "Runterladen",
|
||||
"setCurrentImage": "Setze aktuelle Bild",
|
||||
@ -396,7 +399,14 @@
|
||||
"cancel": "Stornieren",
|
||||
"defaultSettingsSaved": "Standardeinstellungen gespeichert",
|
||||
"addModels": "Model hinzufügen",
|
||||
"deleteModelImage": "Lösche Model Bild"
|
||||
"deleteModelImage": "Lösche Model Bild",
|
||||
"hfTokenInvalidErrorMessage": "Falscher oder fehlender HuggingFace Schlüssel.",
|
||||
"huggingFaceRepoID": "HuggingFace Repo ID",
|
||||
"hfToken": "HuggingFace Schlüssel",
|
||||
"hfTokenInvalid": "Falscher oder fehlender HF Schlüssel",
|
||||
"huggingFacePlaceholder": "besitzer/model-name",
|
||||
"hfTokenSaved": "HF Schlüssel gespeichert",
|
||||
"hfTokenUnableToVerify": "Konnte den HF Schlüssel nicht validieren"
|
||||
},
|
||||
"parameters": {
|
||||
"images": "Bilder",
|
||||
@ -685,7 +695,11 @@
|
||||
"hands": "Hände",
|
||||
"dwOpenpose": "DW Openpose",
|
||||
"dwOpenposeDescription": "Posenschätzung mit DW Openpose",
|
||||
"selectCLIPVisionModel": "Wähle ein CLIP Vision Model aus"
|
||||
"selectCLIPVisionModel": "Wähle ein CLIP Vision Model aus",
|
||||
"ipAdapterMethod": "Methode",
|
||||
"composition": "Nur Komposition",
|
||||
"full": "Voll",
|
||||
"style": "Nur Style"
|
||||
},
|
||||
"queue": {
|
||||
"status": "Status",
|
||||
@ -716,7 +730,6 @@
|
||||
"resume": "Wieder aufnehmen",
|
||||
"item": "Auftrag",
|
||||
"notReady": "Warteschlange noch nicht bereit",
|
||||
"queueCountPrediction": "{{promptsCount}} Prompts × {{iterations}} Iterationen -> {{count}} Generationen",
|
||||
"clearQueueAlertDialog": "\"Die Warteschlange leeren\" stoppt den aktuellen Prozess und leert die Warteschlange komplett.",
|
||||
"completedIn": "Fertig in",
|
||||
"cancelBatchSucceeded": "Stapel abgebrochen",
|
||||
|
@ -69,6 +69,7 @@
|
||||
"auto": "Auto",
|
||||
"back": "Back",
|
||||
"batch": "Batch Manager",
|
||||
"beta": "Beta",
|
||||
"cancel": "Cancel",
|
||||
"copy": "Copy",
|
||||
"copyError": "$t(gallery.copy) Error",
|
||||
@ -83,13 +84,17 @@
|
||||
"direction": "Direction",
|
||||
"ipAdapter": "IP Adapter",
|
||||
"t2iAdapter": "T2I Adapter",
|
||||
"positivePrompt": "Positive Prompt",
|
||||
"negativePrompt": "Negative Prompt",
|
||||
"discordLabel": "Discord",
|
||||
"dontAskMeAgain": "Don't ask me again",
|
||||
"editor": "Editor",
|
||||
"error": "Error",
|
||||
"file": "File",
|
||||
"folder": "Folder",
|
||||
"format": "format",
|
||||
"githubLabel": "Github",
|
||||
"goTo": "Go to",
|
||||
"hotkeysLabel": "Hotkeys",
|
||||
"imageFailedToLoad": "Unable to Load Image",
|
||||
"img2img": "Image To Image",
|
||||
@ -135,7 +140,13 @@
|
||||
"red": "Red",
|
||||
"green": "Green",
|
||||
"blue": "Blue",
|
||||
"alpha": "Alpha"
|
||||
"alpha": "Alpha",
|
||||
"selected": "Selected",
|
||||
"tab": "Tab",
|
||||
"viewing": "Viewing",
|
||||
"viewingDesc": "Review images in a large gallery view",
|
||||
"editing": "Editing",
|
||||
"editingDesc": "Edit on the Control Layers canvas"
|
||||
},
|
||||
"controlnet": {
|
||||
"controlAdapter_one": "Control Adapter",
|
||||
@ -151,6 +162,7 @@
|
||||
"balanced": "Balanced",
|
||||
"base": "Base",
|
||||
"beginEndStepPercent": "Begin / End Step Percentage",
|
||||
"beginEndStepPercentShort": "Begin/End %",
|
||||
"bgth": "bg_th",
|
||||
"canny": "Canny",
|
||||
"cannyDescription": "Canny edge detection",
|
||||
@ -213,12 +225,17 @@
|
||||
"resize": "Resize",
|
||||
"resizeSimple": "Resize (Simple)",
|
||||
"resizeMode": "Resize Mode",
|
||||
"ipAdapterMethod": "Method",
|
||||
"full": "Full",
|
||||
"style": "Style Only",
|
||||
"composition": "Composition Only",
|
||||
"safe": "Safe",
|
||||
"saveControlImage": "Save Control Image",
|
||||
"scribble": "scribble",
|
||||
"scribble": "Scribble",
|
||||
"selectModel": "Select a model",
|
||||
"selectCLIPVisionModel": "Select a CLIP Vision model",
|
||||
"setControlImageDimensions": "Set Control Image Dimensions To W/H",
|
||||
"setControlImageDimensions": "Copy size to W/H (optimize for model)",
|
||||
"setControlImageDimensionsForce": "Copy size to W/H (ignore model)",
|
||||
"showAdvanced": "Show Advanced",
|
||||
"small": "Small",
|
||||
"toggleControlNet": "Toggle this ControlNet",
|
||||
@ -244,7 +261,6 @@
|
||||
"queue": "Queue",
|
||||
"queueFront": "Add to Front of Queue",
|
||||
"queueBack": "Add to Queue",
|
||||
"queueCountPrediction": "{{promptsCount}} prompts \u00d7 {{iterations}} iterations -> {{count}} generations",
|
||||
"queueEmpty": "Queue Empty",
|
||||
"enqueueing": "Queueing Batch",
|
||||
"resume": "Resume",
|
||||
@ -297,7 +313,13 @@
|
||||
"batchFailedToQueue": "Failed to Queue Batch",
|
||||
"graphQueued": "Graph queued",
|
||||
"graphFailedToQueue": "Failed to queue graph",
|
||||
"openQueue": "Open Queue"
|
||||
"openQueue": "Open Queue",
|
||||
"prompts_one": "Prompt",
|
||||
"prompts_other": "Prompts",
|
||||
"iterations_one": "Iteration",
|
||||
"iterations_other": "Iterations",
|
||||
"generations_one": "Generation",
|
||||
"generations_other": "Generations"
|
||||
},
|
||||
"invocationCache": {
|
||||
"invocationCache": "Invocation Cache",
|
||||
@ -573,6 +595,10 @@
|
||||
"upscale": {
|
||||
"desc": "Upscale the current image",
|
||||
"title": "Upscale"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"desc": "Switches between the Image Viewer and workspace for the current tab.",
|
||||
"title": "Toggle Image Viewer"
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
@ -748,10 +774,15 @@
|
||||
"cannotConnectOutputToOutput": "Cannot connect output to output",
|
||||
"cannotConnectToSelf": "Cannot connect to self",
|
||||
"cannotDuplicateConnection": "Cannot create duplicate connections",
|
||||
"cannotMixAndMatchCollectionItemTypes": "Cannot mix and match collection item types",
|
||||
"missingNode": "Missing invocation node",
|
||||
"missingInvocationTemplate": "Missing invocation template",
|
||||
"missingFieldTemplate": "Missing field template",
|
||||
"nodePack": "Node pack",
|
||||
"collection": "Collection",
|
||||
"collectionFieldType": "{{name}} Collection",
|
||||
"collectionOrScalarFieldType": "{{name}} Collection|Scalar",
|
||||
"singleFieldType": "{{name}} (Single)",
|
||||
"collectionFieldType": "{{name}} (Collection)",
|
||||
"collectionOrScalarFieldType": "{{name}} (Single or Collection)",
|
||||
"colorCodeEdges": "Color-Code Edges",
|
||||
"colorCodeEdgesHelp": "Color-code edges according to their connected fields",
|
||||
"connectionWouldCreateCycle": "Connection would create a cycle",
|
||||
@ -770,6 +801,8 @@
|
||||
"float": "Float",
|
||||
"fullyContainNodes": "Fully Contain Nodes to Select",
|
||||
"fullyContainNodesHelp": "Nodes must be fully inside the selection box to be selected",
|
||||
"showEdgeLabels": "Show Edge Labels",
|
||||
"showEdgeLabelsHelp": "Show labels on edges, indicating the connected nodes",
|
||||
"hideLegendNodes": "Hide Field Type Legend",
|
||||
"hideMinimapnodes": "Hide MiniMap",
|
||||
"inputMayOnlyHaveOneConnection": "Input may only have one connection",
|
||||
@ -851,6 +884,7 @@
|
||||
"versionUnknown": " Version Unknown",
|
||||
"workflow": "Workflow",
|
||||
"graph": "Graph",
|
||||
"noGraph": "No Graph",
|
||||
"workflowAuthor": "Author",
|
||||
"workflowContact": "Contact",
|
||||
"workflowDescription": "Short Description",
|
||||
@ -886,6 +920,7 @@
|
||||
"denoisingStrength": "Denoising Strength",
|
||||
"downloadImage": "Download Image",
|
||||
"general": "General",
|
||||
"globalSettings": "Global Settings",
|
||||
"height": "Height",
|
||||
"imageFit": "Fit Initial Image To Output Size",
|
||||
"images": "Images",
|
||||
@ -903,23 +938,37 @@
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} missing input",
|
||||
"missingNodeTemplate": "Missing node template",
|
||||
"noControlImageForControlAdapter": "Control Adapter #{{number}} has no control image",
|
||||
"imageNotProcessedForControlAdapter": "Control Adapter #{{number}}'s image is not processed",
|
||||
"noInitialImageSelected": "No initial image selected",
|
||||
"noModelForControlAdapter": "Control Adapter #{{number}} has no model selected.",
|
||||
"incompatibleBaseModelForControlAdapter": "Control Adapter #{{number}} model is incompatible with main model.",
|
||||
"noModelSelected": "No model selected",
|
||||
"noPrompts": "No prompts generated",
|
||||
"noNodesInGraph": "No nodes in graph",
|
||||
"systemDisconnected": "System disconnected"
|
||||
"systemDisconnected": "System disconnected",
|
||||
"layer": {
|
||||
"initialImageNoImageSelected": "no initial image selected",
|
||||
"controlAdapterNoModelSelected": "no Control Adapter model selected",
|
||||
"controlAdapterIncompatibleBaseModel": "incompatible Control Adapter base model",
|
||||
"controlAdapterNoImageSelected": "no Control Adapter image selected",
|
||||
"controlAdapterImageNotProcessed": "Control Adapter image not processed",
|
||||
"t2iAdapterIncompatibleDimensions": "T2I Adapter requires image dimension to be multiples of {{multiple}}",
|
||||
"ipAdapterNoModelSelected": "no IP adapter selected",
|
||||
"ipAdapterIncompatibleBaseModel": "incompatible IP Adapter base model",
|
||||
"ipAdapterNoImageSelected": "no IP Adapter image selected",
|
||||
"rgNoPromptsOrIPAdapters": "no text prompts or IP Adapters",
|
||||
"rgNoRegion": "no region selected"
|
||||
}
|
||||
},
|
||||
"maskBlur": "Mask Blur",
|
||||
"negativePromptPlaceholder": "Negative Prompt",
|
||||
"globalNegativePromptPlaceholder": "Global Negative Prompt",
|
||||
"noiseThreshold": "Noise Threshold",
|
||||
"patchmatchDownScaleSize": "Downscale",
|
||||
"perlinNoise": "Perlin Noise",
|
||||
"positivePromptPlaceholder": "Positive Prompt",
|
||||
"globalPositivePromptPlaceholder": "Global Positive Prompt",
|
||||
"iterations": "Iterations",
|
||||
"iterationsWithCount_one": "{{count}} Iteration",
|
||||
"iterationsWithCount_other": "{{count}} Iterations",
|
||||
"scale": "Scale",
|
||||
"scaleBeforeProcessing": "Scale Before Processing",
|
||||
"scaledHeight": "Scaled H",
|
||||
@ -1176,6 +1225,10 @@
|
||||
"heading": "Resize Mode",
|
||||
"paragraphs": ["Method to fit Control Adapter's input image size to the output generation size."]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "Method",
|
||||
"paragraphs": ["Method by which to apply the current IP Adapter."]
|
||||
},
|
||||
"controlNetWeight": {
|
||||
"heading": "Weight",
|
||||
"paragraphs": [
|
||||
@ -1494,5 +1547,55 @@
|
||||
},
|
||||
"app": {
|
||||
"storeNotInitialized": "Store is not initialized"
|
||||
},
|
||||
"controlLayers": {
|
||||
"deleteAll": "Delete All",
|
||||
"addLayer": "Add Layer",
|
||||
"moveToFront": "Move to Front",
|
||||
"moveToBack": "Move to Back",
|
||||
"moveForward": "Move Forward",
|
||||
"moveBackward": "Move Backward",
|
||||
"brushSize": "Brush Size",
|
||||
"controlLayers": "Control Layers",
|
||||
"globalMaskOpacity": "Global Mask Opacity",
|
||||
"autoNegative": "Auto Negative",
|
||||
"toggleVisibility": "Toggle Layer Visibility",
|
||||
"deletePrompt": "Delete Prompt",
|
||||
"resetRegion": "Reset Region",
|
||||
"debugLayers": "Debug Layers",
|
||||
"rectangle": "Rectangle",
|
||||
"maskPreviewColor": "Mask Preview Color",
|
||||
"addPositivePrompt": "Add $t(common.positivePrompt)",
|
||||
"addNegativePrompt": "Add $t(common.negativePrompt)",
|
||||
"addIPAdapter": "Add $t(common.ipAdapter)",
|
||||
"regionalGuidance": "Regional Guidance",
|
||||
"regionalGuidanceLayer": "$t(controlLayers.regionalGuidance) $t(unifiedCanvas.layer)",
|
||||
"opacity": "Opacity",
|
||||
"globalControlAdapter": "Global $t(controlnet.controlAdapter_one)",
|
||||
"globalControlAdapterLayer": "Global $t(controlnet.controlAdapter_one) $t(unifiedCanvas.layer)",
|
||||
"globalIPAdapter": "Global $t(common.ipAdapter)",
|
||||
"globalIPAdapterLayer": "Global $t(common.ipAdapter) $t(unifiedCanvas.layer)",
|
||||
"globalInitialImage": "Global Initial Image",
|
||||
"globalInitialImageLayer": "$t(controlLayers.globalInitialImage) $t(unifiedCanvas.layer)",
|
||||
"opacityFilter": "Opacity Filter",
|
||||
"clearProcessor": "Clear Processor",
|
||||
"resetProcessor": "Reset Processor to Defaults",
|
||||
"noLayersAdded": "No Layers Added",
|
||||
"layers_one": "Layer",
|
||||
"layers_other": "Layers"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generation": "Generation",
|
||||
"generationTab": "$t(ui.tabs.generation) $t(common.tab)",
|
||||
"canvas": "Canvas",
|
||||
"canvasTab": "$t(ui.tabs.canvas) $t(common.tab)",
|
||||
"workflows": "Workflows",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"models": "Models",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"queue": "Queue",
|
||||
"queueTab": "$t(ui.tabs.queue) $t(common.tab)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -25,7 +25,24 @@
|
||||
"areYouSure": "¿Estas seguro?",
|
||||
"batch": "Administrador de lotes",
|
||||
"modelManager": "Administrador de modelos",
|
||||
"communityLabel": "Comunidad"
|
||||
"communityLabel": "Comunidad",
|
||||
"direction": "Dirección",
|
||||
"ai": "Ia",
|
||||
"add": "Añadir",
|
||||
"auto": "Automático",
|
||||
"copyError": "Error $t(gallery.copy)",
|
||||
"details": "Detalles",
|
||||
"or": "o",
|
||||
"checkpoint": "Punto de control",
|
||||
"controlNet": "ControlNet",
|
||||
"aboutHeading": "Sea dueño de su poder creativo",
|
||||
"advanced": "Avanzado",
|
||||
"data": "Fecha",
|
||||
"delete": "Borrar",
|
||||
"copy": "Copiar",
|
||||
"beta": "Beta",
|
||||
"on": "En",
|
||||
"aboutDesc": "¿Utilizas Invoke para trabajar? Mira aquí:"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Tamaño de la imagen",
|
||||
@ -33,7 +50,9 @@
|
||||
"autoSwitchNewImages": "Auto seleccionar Imágenes nuevas",
|
||||
"loadMore": "Cargar más",
|
||||
"noImagesInGallery": "No hay imágenes para mostrar",
|
||||
"deleteImage": "Eliminar Imagen",
|
||||
"deleteImage_one": "Eliminar Imagen",
|
||||
"deleteImage_many": "",
|
||||
"deleteImage_other": "",
|
||||
"deleteImageBin": "Las imágenes eliminadas se enviarán a la papelera de tu sistema operativo.",
|
||||
"deleteImagePermanent": "Las imágenes eliminadas no se pueden restaurar.",
|
||||
"assets": "Activos",
|
||||
@ -441,7 +460,13 @@
|
||||
"previousImage": "Imagen anterior",
|
||||
"nextImage": "Siguiente imagen",
|
||||
"showOptionsPanel": "Mostrar el panel lateral",
|
||||
"menu": "Menú"
|
||||
"menu": "Menú",
|
||||
"showGalleryPanel": "Mostrar panel de galería",
|
||||
"loadMore": "Cargar más",
|
||||
"about": "Acerca de",
|
||||
"createIssue": "Crear un problema",
|
||||
"resetUI": "Interfaz de usuario $t(accessibility.reset)",
|
||||
"mode": "Modo"
|
||||
},
|
||||
"nodes": {
|
||||
"zoomInNodes": "Acercar",
|
||||
@ -454,5 +479,68 @@
|
||||
"reloadNodeTemplates": "Recargar las plantillas de nodos",
|
||||
"loadWorkflow": "Cargar el flujo de trabajo",
|
||||
"downloadWorkflow": "Descargar el flujo de trabajo en un archivo JSON"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Agregar panel automáticamente",
|
||||
"changeBoard": "Cambiar el panel",
|
||||
"clearSearch": "Borrar la búsqueda",
|
||||
"deleteBoard": "Borrar el panel",
|
||||
"selectBoard": "Seleccionar un panel",
|
||||
"uncategorized": "Sin categoría",
|
||||
"cancel": "Cancelar",
|
||||
"addBoard": "Agregar un panel",
|
||||
"movingImagesToBoard_one": "Moviendo {{count}} imagen al panel:",
|
||||
"movingImagesToBoard_many": "Moviendo {{count}} imágenes al panel:",
|
||||
"movingImagesToBoard_other": "Moviendo {{count}} imágenes al panel:",
|
||||
"bottomMessage": "Al eliminar este panel y las imágenes que contiene, se restablecerán las funciones que los estén utilizando actualmente.",
|
||||
"deleteBoardAndImages": "Borrar el panel y las imágenes",
|
||||
"loading": "Cargando...",
|
||||
"deletedBoardsCannotbeRestored": "Los paneles eliminados no se pueden restaurar",
|
||||
"move": "Mover",
|
||||
"menuItemAutoAdd": "Agregar automáticamente a este panel",
|
||||
"searchBoard": "Buscando paneles…",
|
||||
"topMessage": "Este panel contiene imágenes utilizadas en las siguientes funciones:",
|
||||
"downloadBoard": "Descargar panel",
|
||||
"deleteBoardOnly": "Borrar solo el panel",
|
||||
"myBoard": "Mi panel",
|
||||
"noMatching": "No hay paneles que coincidan"
|
||||
},
|
||||
"accordions": {
|
||||
"compositing": {
|
||||
"title": "Composición",
|
||||
"infillTab": "Relleno"
|
||||
},
|
||||
"generation": {
|
||||
"title": "Generación"
|
||||
},
|
||||
"image": {
|
||||
"title": "Imagen"
|
||||
},
|
||||
"control": {
|
||||
"title": "Control"
|
||||
},
|
||||
"advanced": {
|
||||
"options": "$t(accordions.advanced.title) opciones",
|
||||
"title": "Avanzado"
|
||||
}
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generationTab": "$t(ui.tabs.generation) $t(common.tab)",
|
||||
"canvas": "Lienzo",
|
||||
"generation": "Generación",
|
||||
"queue": "Cola",
|
||||
"queueTab": "$t(ui.tabs.queue) $t(common.tab)",
|
||||
"workflows": "Flujos de trabajo",
|
||||
"models": "Modelos",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"canvasTab": "$t(ui.tabs.canvas) $t(common.tab)",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)"
|
||||
}
|
||||
},
|
||||
"controlLayers": {
|
||||
"layers_one": "Capa",
|
||||
"layers_many": "Capas",
|
||||
"layers_other": "Capas"
|
||||
}
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
"reportBugLabel": "Segnala un errore",
|
||||
"settingsLabel": "Impostazioni",
|
||||
"img2img": "Immagine a Immagine",
|
||||
"unifiedCanvas": "Tela unificata",
|
||||
"unifiedCanvas": "Tela",
|
||||
"nodes": "Flussi di lavoro",
|
||||
"upload": "Caricamento",
|
||||
"load": "Carica",
|
||||
@ -74,7 +74,18 @@
|
||||
"file": "File",
|
||||
"toResolve": "Da risolvere",
|
||||
"add": "Aggiungi",
|
||||
"loglevel": "Livello di log"
|
||||
"loglevel": "Livello di log",
|
||||
"beta": "Beta",
|
||||
"positivePrompt": "Prompt positivo",
|
||||
"negativePrompt": "Prompt negativo",
|
||||
"selected": "Selezionato",
|
||||
"goTo": "Vai a",
|
||||
"editor": "Editor",
|
||||
"tab": "Scheda",
|
||||
"viewing": "Visualizza",
|
||||
"viewingDesc": "Rivedi le immagini in un'ampia vista della galleria",
|
||||
"editing": "Modifica",
|
||||
"editingDesc": "Modifica nell'area Livelli di controllo"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Dimensione dell'immagine",
|
||||
@ -82,7 +93,9 @@
|
||||
"autoSwitchNewImages": "Passaggio automatico a nuove immagini",
|
||||
"loadMore": "Carica altro",
|
||||
"noImagesInGallery": "Nessuna immagine da visualizzare",
|
||||
"deleteImage": "Elimina l'immagine",
|
||||
"deleteImage_one": "Elimina l'immagine",
|
||||
"deleteImage_many": "Elimina {{count}} immagini",
|
||||
"deleteImage_other": "Elimina {{count}} immagini",
|
||||
"deleteImagePermanent": "Le immagini eliminate non possono essere ripristinate.",
|
||||
"deleteImageBin": "Le immagini eliminate verranno spostate nel cestino del tuo sistema operativo.",
|
||||
"assets": "Risorse",
|
||||
@ -178,8 +191,8 @@
|
||||
"desc": "Mostra le informazioni sui metadati dell'immagine corrente"
|
||||
},
|
||||
"sendToImageToImage": {
|
||||
"title": "Invia a Immagine a Immagine",
|
||||
"desc": "Invia l'immagine corrente a da Immagine a Immagine"
|
||||
"title": "Invia a Generazione da immagine",
|
||||
"desc": "Invia l'immagine corrente a Generazione da immagine"
|
||||
},
|
||||
"deleteImage": {
|
||||
"title": "Elimina immagine",
|
||||
@ -332,6 +345,10 @@
|
||||
"remixImage": {
|
||||
"desc": "Utilizza tutti i parametri tranne il seme dell'immagine corrente",
|
||||
"title": "Remixa l'immagine"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "Attiva/disattiva il visualizzatore di immagini",
|
||||
"desc": "Passa dal Visualizzatore immagini all'area di lavoro per la scheda corrente."
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
@ -469,8 +486,8 @@
|
||||
"scaledHeight": "Altezza ridimensionata",
|
||||
"infillMethod": "Metodo di riempimento",
|
||||
"tileSize": "Dimensione piastrella",
|
||||
"sendToImg2Img": "Invia a Immagine a Immagine",
|
||||
"sendToUnifiedCanvas": "Invia a Tela Unificata",
|
||||
"sendToImg2Img": "Invia a Generazione da immagine",
|
||||
"sendToUnifiedCanvas": "Invia alla Tela",
|
||||
"downloadImage": "Scarica l'immagine",
|
||||
"usePrompt": "Usa Prompt",
|
||||
"useSeed": "Usa Seme",
|
||||
@ -506,13 +523,11 @@
|
||||
"incompatibleBaseModelForControlAdapter": "Il modello dell'adattatore di controllo #{{number}} non è compatibile con il modello principale.",
|
||||
"missingNodeTemplate": "Modello di nodo mancante",
|
||||
"missingInputForField": "{{nodeLabel}} -> {{fieldLabel}} ingresso mancante",
|
||||
"missingFieldTemplate": "Modello di campo mancante"
|
||||
"missingFieldTemplate": "Modello di campo mancante",
|
||||
"imageNotProcessedForControlAdapter": "L'immagine dell'adattatore di controllo #{{number}} non è stata elaborata"
|
||||
},
|
||||
"useCpuNoise": "Usa la CPU per generare rumore",
|
||||
"iterations": "Iterazioni",
|
||||
"iterationsWithCount_one": "{{count}} Iterazione",
|
||||
"iterationsWithCount_many": "{{count}} Iterazioni",
|
||||
"iterationsWithCount_other": "{{count}} Iterazioni",
|
||||
"isAllowedToUpscale": {
|
||||
"useX2Model": "L'immagine è troppo grande per l'ampliamento con il modello x4, utilizza il modello x2",
|
||||
"tooLarge": "L'immagine è troppo grande per l'ampliamento, seleziona un'immagine più piccola"
|
||||
@ -532,7 +547,10 @@
|
||||
"infillMosaicMinColor": "Colore minimo",
|
||||
"infillMosaicMaxColor": "Colore massimo",
|
||||
"infillMosaicTileHeight": "Altezza piastrella",
|
||||
"infillColorValue": "Colore di riempimento"
|
||||
"infillColorValue": "Colore di riempimento",
|
||||
"globalSettings": "Impostazioni globali",
|
||||
"globalPositivePromptPlaceholder": "Prompt positivo globale",
|
||||
"globalNegativePromptPlaceholder": "Prompt negativo globale"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Modelli",
|
||||
@ -557,7 +575,7 @@
|
||||
"intermediatesCleared_one": "Cancellata {{count}} immagine intermedia",
|
||||
"intermediatesCleared_many": "Cancellate {{count}} immagini intermedie",
|
||||
"intermediatesCleared_other": "Cancellate {{count}} immagini intermedie",
|
||||
"clearIntermediatesDesc1": "La cancellazione delle immagini intermedie ripristinerà lo stato di Tela Unificata e ControlNet.",
|
||||
"clearIntermediatesDesc1": "La cancellazione delle immagini intermedie ripristinerà lo stato della Tela e degli Adattatori di Controllo.",
|
||||
"intermediatesClearedFailed": "Problema con la cancellazione delle immagini intermedie",
|
||||
"clearIntermediatesWithCount_one": "Cancella {{count}} immagine intermedia",
|
||||
"clearIntermediatesWithCount_many": "Cancella {{count}} immagini intermedie",
|
||||
@ -573,8 +591,8 @@
|
||||
"imageCopied": "Immagine copiata",
|
||||
"imageNotLoadedDesc": "Impossibile trovare l'immagine",
|
||||
"canvasMerged": "Tela unita",
|
||||
"sentToImageToImage": "Inviato a Immagine a Immagine",
|
||||
"sentToUnifiedCanvas": "Inviato a Tela Unificata",
|
||||
"sentToImageToImage": "Inviato a Generazione da immagine",
|
||||
"sentToUnifiedCanvas": "Inviato alla Tela",
|
||||
"parametersNotSet": "Parametri non impostati",
|
||||
"metadataLoadFailed": "Impossibile caricare i metadati",
|
||||
"serverError": "Errore del Server",
|
||||
@ -793,7 +811,7 @@
|
||||
"float": "In virgola mobile",
|
||||
"currentImageDescription": "Visualizza l'immagine corrente nell'editor dei nodi",
|
||||
"fieldTypesMustMatch": "I tipi di campo devono corrispondere",
|
||||
"edge": "Bordo",
|
||||
"edge": "Collegamento",
|
||||
"currentImage": "Immagine corrente",
|
||||
"integer": "Numero Intero",
|
||||
"inputMayOnlyHaveOneConnection": "L'ingresso può avere solo una connessione",
|
||||
@ -843,7 +861,9 @@
|
||||
"resetToDefaultValue": "Ripristina il valore predefinito",
|
||||
"noFieldsViewMode": "Questo flusso di lavoro non ha campi selezionati da visualizzare. Visualizza il flusso di lavoro completo per configurare i valori.",
|
||||
"edit": "Modifica",
|
||||
"graph": "Grafico"
|
||||
"graph": "Grafico",
|
||||
"showEdgeLabelsHelp": "Mostra etichette sui collegamenti, che indicano i nodi collegati",
|
||||
"showEdgeLabels": "Mostra le etichette del collegamento"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Aggiungi automaticamente bacheca",
|
||||
@ -920,7 +940,7 @@
|
||||
"colorMapTileSize": "Dimensione piastrella",
|
||||
"mediapipeFaceDescription": "Rilevamento dei volti tramite Mediapipe",
|
||||
"hedDescription": "Rilevamento dei bordi nidificati olisticamente",
|
||||
"setControlImageDimensions": "Imposta le dimensioni dell'immagine di controllo su L/A",
|
||||
"setControlImageDimensions": "Copia le dimensioni in L/A (ottimizza per il modello)",
|
||||
"maxFaces": "Numero massimo di volti",
|
||||
"addT2IAdapter": "Aggiungi $t(common.t2iAdapter)",
|
||||
"addControlNet": "Aggiungi $t(common.controlNet)",
|
||||
@ -949,12 +969,17 @@
|
||||
"mediapipeFace": "Mediapipe Volto",
|
||||
"ip_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.ipAdapter))",
|
||||
"t2i_adapter": "$t(controlnet.controlAdapter_one) #{{number}} ($t(common.t2iAdapter))",
|
||||
"selectCLIPVisionModel": "Seleziona un modello CLIP Vision"
|
||||
"selectCLIPVisionModel": "Seleziona un modello CLIP Vision",
|
||||
"ipAdapterMethod": "Metodo",
|
||||
"full": "Completo",
|
||||
"composition": "Solo la composizione",
|
||||
"style": "Solo lo stile",
|
||||
"beginEndStepPercentShort": "Inizio/Fine %",
|
||||
"setControlImageDimensionsForce": "Copia le dimensioni in L/A (ignora il modello)"
|
||||
},
|
||||
"queue": {
|
||||
"queueFront": "Aggiungi all'inizio della coda",
|
||||
"queueBack": "Aggiungi alla coda",
|
||||
"queueCountPrediction": "{{promptsCount}} prompt × {{iterations}} iterazioni -> {{count}} generazioni",
|
||||
"queue": "Coda",
|
||||
"status": "Stato",
|
||||
"pruneSucceeded": "Rimossi {{item_count}} elementi completati dalla coda",
|
||||
@ -991,7 +1016,7 @@
|
||||
"cancelBatchSucceeded": "Lotto annullato",
|
||||
"clearTooltip": "Annulla e cancella tutti gli elementi",
|
||||
"current": "Attuale",
|
||||
"pauseTooltip": "Sospende l'elaborazione",
|
||||
"pauseTooltip": "Sospendi l'elaborazione",
|
||||
"failed": "Falliti",
|
||||
"cancelItem": "Annulla l'elemento",
|
||||
"next": "Prossimo",
|
||||
@ -1392,6 +1417,12 @@
|
||||
"paragraphs": [
|
||||
"La dimensione del bordo del passaggio di coerenza."
|
||||
]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "Metodo",
|
||||
"paragraphs": [
|
||||
"Metodo con cui applicare l'adattatore IP corrente."
|
||||
]
|
||||
}
|
||||
},
|
||||
"sdxl": {
|
||||
@ -1520,5 +1551,56 @@
|
||||
"compatibleEmbeddings": "Incorporamenti compatibili",
|
||||
"addPromptTrigger": "Aggiungi Trigger nel prompt",
|
||||
"noMatchingTriggers": "Nessun Trigger corrispondente"
|
||||
},
|
||||
"controlLayers": {
|
||||
"opacityFilter": "Filtro opacità",
|
||||
"deleteAll": "Cancella tutto",
|
||||
"addLayer": "Aggiungi Livello",
|
||||
"moveToFront": "Sposta in primo piano",
|
||||
"moveToBack": "Sposta in fondo",
|
||||
"moveForward": "Sposta avanti",
|
||||
"moveBackward": "Sposta indietro",
|
||||
"brushSize": "Dimensioni del pennello",
|
||||
"globalMaskOpacity": "Opacità globale della maschera",
|
||||
"autoNegative": "Auto Negativo",
|
||||
"toggleVisibility": "Attiva/disattiva la visibilità dei livelli",
|
||||
"deletePrompt": "Cancella il prompt",
|
||||
"debugLayers": "Debug dei Livelli",
|
||||
"rectangle": "Rettangolo",
|
||||
"maskPreviewColor": "Colore anteprima maschera",
|
||||
"addPositivePrompt": "Aggiungi $t(common.positivePrompt)",
|
||||
"addNegativePrompt": "Aggiungi $t(common.negativePrompt)",
|
||||
"addIPAdapter": "Aggiungi $t(common.ipAdapter)",
|
||||
"regionalGuidance": "Guida regionale",
|
||||
"regionalGuidanceLayer": "$t(unifiedCanvas.layer) $t(controlLayers.regionalGuidance)",
|
||||
"opacity": "Opacità",
|
||||
"globalControlAdapter": "$t(controlnet.controlAdapter_one) Globale",
|
||||
"globalControlAdapterLayer": "$t(controlnet.controlAdapter_one) - $t(unifiedCanvas.layer) Globale",
|
||||
"globalIPAdapter": "$t(common.ipAdapter) Globale",
|
||||
"globalIPAdapterLayer": "$t(common.ipAdapter) - $t(unifiedCanvas.layer) Globale",
|
||||
"globalInitialImage": "Immagine iniziale",
|
||||
"globalInitialImageLayer": "$t(controlLayers.globalInitialImage) - $t(unifiedCanvas.layer) Globale",
|
||||
"clearProcessor": "Cancella processore",
|
||||
"resetProcessor": "Ripristina il processore alle impostazioni predefinite",
|
||||
"noLayersAdded": "Nessun livello aggiunto",
|
||||
"resetRegion": "Reimposta la regione",
|
||||
"controlLayers": "Livelli di controllo",
|
||||
"layers_one": "Livello",
|
||||
"layers_many": "Livelli",
|
||||
"layers_other": "Livelli"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generation": "Generazione",
|
||||
"generationTab": "$t(ui.tabs.generation) $t(common.tab)",
|
||||
"canvas": "Tela",
|
||||
"canvasTab": "$t(ui.tabs.canvas) $t(common.tab)",
|
||||
"workflows": "Flussi di lavoro",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"models": "Modelli",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"queue": "Coda",
|
||||
"queueTab": "$t(ui.tabs.queue) $t(common.tab)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -90,7 +90,7 @@
|
||||
"problemDeletingImages": "画像の削除中に問題が発生",
|
||||
"drop": "ドロップ",
|
||||
"dropOrUpload": "$t(gallery.drop) またはアップロード",
|
||||
"deleteImage": "画像を削除",
|
||||
"deleteImage_other": "画像を削除",
|
||||
"deleteImageBin": "削除された画像はOSのゴミ箱に送られます。",
|
||||
"deleteImagePermanent": "削除された画像は復元できません。",
|
||||
"download": "ダウンロード",
|
||||
@ -570,7 +570,6 @@
|
||||
"pauseSucceeded": "処理が一時停止されました",
|
||||
"queueFront": "キューの先頭へ追加",
|
||||
"queueBack": "キューに追加",
|
||||
"queueCountPrediction": "{{promptsCount}} プロンプト × {{iterations}} イテレーション -> {{count}} 枚生成",
|
||||
"pause": "一時停止",
|
||||
"queue": "キュー",
|
||||
"pauseTooltip": "処理を一時停止",
|
||||
|
@ -82,7 +82,7 @@
|
||||
"drop": "드랍",
|
||||
"problemDeletingImages": "이미지 삭제 중 발생한 문제",
|
||||
"downloadSelection": "선택 항목 다운로드",
|
||||
"deleteImage": "이미지 삭제",
|
||||
"deleteImage_other": "이미지 삭제",
|
||||
"currentlyInUse": "이 이미지는 현재 다음 기능에서 사용되고 있습니다:",
|
||||
"dropOrUpload": "$t(gallery.drop) 또는 업로드",
|
||||
"copy": "복사",
|
||||
@ -505,7 +505,6 @@
|
||||
"completed": "완성된",
|
||||
"queueBack": "Queue에 추가",
|
||||
"cancelFailed": "항목 취소 중 발생한 문제",
|
||||
"queueCountPrediction": "Queue에 {{predicted}} 추가",
|
||||
"batchQueued": "Batch Queued",
|
||||
"pauseFailed": "프로세서 중지 중 발생한 문제",
|
||||
"clearFailed": "Queue 제거 중 발생한 문제",
|
||||
|
@ -42,7 +42,8 @@
|
||||
"autoSwitchNewImages": "Wissel autom. naar nieuwe afbeeldingen",
|
||||
"loadMore": "Laad meer",
|
||||
"noImagesInGallery": "Geen afbeeldingen om te tonen",
|
||||
"deleteImage": "Verwijder afbeelding",
|
||||
"deleteImage_one": "Verwijder afbeelding",
|
||||
"deleteImage_other": "",
|
||||
"deleteImageBin": "Verwijderde afbeeldingen worden naar de prullenbak van je besturingssysteem gestuurd.",
|
||||
"deleteImagePermanent": "Verwijderde afbeeldingen kunnen niet worden hersteld.",
|
||||
"assets": "Eigen onderdelen",
|
||||
@ -382,8 +383,6 @@
|
||||
"useCpuNoise": "Gebruik CPU-ruis",
|
||||
"imageActions": "Afbeeldingshandeling",
|
||||
"iterations": "Iteraties",
|
||||
"iterationsWithCount_one": "{{count}} iteratie",
|
||||
"iterationsWithCount_other": "{{count}} iteraties",
|
||||
"coherenceMode": "Modus"
|
||||
},
|
||||
"settings": {
|
||||
@ -939,7 +938,6 @@
|
||||
"completed": "Voltooid",
|
||||
"queueBack": "Voeg toe aan wachtrij",
|
||||
"cancelFailed": "Fout bij annuleren onderdeel",
|
||||
"queueCountPrediction": "Voeg {{predicted}} toe aan wachtrij",
|
||||
"batchQueued": "Reeks in wachtrij geplaatst",
|
||||
"pauseFailed": "Fout bij onderbreken verwerker",
|
||||
"clearFailed": "Fout bij wissen van wachtrij",
|
||||
|
@ -76,7 +76,18 @@
|
||||
"localSystem": "Локальная система",
|
||||
"aboutDesc": "Используя Invoke для работы? Проверьте это:",
|
||||
"add": "Добавить",
|
||||
"loglevel": "Уровень логов"
|
||||
"loglevel": "Уровень логов",
|
||||
"beta": "Бета",
|
||||
"selected": "Выбрано",
|
||||
"positivePrompt": "Позитивный запрос",
|
||||
"negativePrompt": "Негативный запрос",
|
||||
"editor": "Редактор",
|
||||
"goTo": "Перейти к",
|
||||
"tab": "Вкладка",
|
||||
"viewing": "Просмотр",
|
||||
"editing": "Редактирование",
|
||||
"viewingDesc": "Просмотр изображений в режиме большой галереи",
|
||||
"editingDesc": "Редактировать на холсте слоёв управления"
|
||||
},
|
||||
"gallery": {
|
||||
"galleryImageSize": "Размер изображений",
|
||||
@ -86,7 +97,9 @@
|
||||
"noImagesInGallery": "Изображений нет",
|
||||
"deleteImagePermanent": "Удаленные изображения невозможно восстановить.",
|
||||
"deleteImageBin": "Удаленные изображения будут отправлены в корзину вашей операционной системы.",
|
||||
"deleteImage": "Удалить изображение",
|
||||
"deleteImage_one": "Удалить изображение",
|
||||
"deleteImage_few": "Удалить {{count}} изображения",
|
||||
"deleteImage_many": "Удалить {{count}} изображений",
|
||||
"assets": "Ресурсы",
|
||||
"autoAssignBoardOnClick": "Авто-назначение доски по клику",
|
||||
"deleteSelection": "Удалить выделенное",
|
||||
@ -334,6 +347,10 @@
|
||||
"remixImage": {
|
||||
"desc": "Используйте все параметры, кроме сида из текущего изображения",
|
||||
"title": "Ремикс изображения"
|
||||
},
|
||||
"toggleViewer": {
|
||||
"title": "Переключить просмотр изображений",
|
||||
"desc": "Переключение между средством просмотра изображений и рабочей областью для текущей вкладки."
|
||||
}
|
||||
},
|
||||
"modelManager": {
|
||||
@ -510,7 +527,8 @@
|
||||
"missingNodeTemplate": "Отсутствует шаблон узла",
|
||||
"missingFieldTemplate": "Отсутствует шаблон поля",
|
||||
"addingImagesTo": "Добавление изображений в",
|
||||
"invoke": "Создать"
|
||||
"invoke": "Создать",
|
||||
"imageNotProcessedForControlAdapter": "Изображение адаптера контроля №{{number}} не обрабатывается"
|
||||
},
|
||||
"isAllowedToUpscale": {
|
||||
"useX2Model": "Изображение слишком велико для увеличения с помощью модели x4. Используйте модель x2",
|
||||
@ -521,9 +539,6 @@
|
||||
"useCpuNoise": "Использовать шум CPU",
|
||||
"imageActions": "Действия с изображениями",
|
||||
"iterations": "Кол-во",
|
||||
"iterationsWithCount_one": "{{count}} Интеграция",
|
||||
"iterationsWithCount_few": "{{count}} Итерации",
|
||||
"iterationsWithCount_many": "{{count}} Итераций",
|
||||
"useSize": "Использовать размер",
|
||||
"coherenceMode": "Режим",
|
||||
"aspect": "Соотношение",
|
||||
@ -539,7 +554,10 @@
|
||||
"infillMosaicTileHeight": "Высота плиток",
|
||||
"infillMosaicMinColor": "Мин цвет",
|
||||
"infillMosaicMaxColor": "Макс цвет",
|
||||
"infillColorValue": "Цвет заливки"
|
||||
"infillColorValue": "Цвет заливки",
|
||||
"globalSettings": "Глобальные настройки",
|
||||
"globalNegativePromptPlaceholder": "Глобальный негативный запрос",
|
||||
"globalPositivePromptPlaceholder": "Глобальный запрос"
|
||||
},
|
||||
"settings": {
|
||||
"models": "Модели",
|
||||
@ -704,7 +722,9 @@
|
||||
"coherenceModeBoxBlur": "коробчатое размытие",
|
||||
"discardCurrent": "Отбросить текущее",
|
||||
"invertBrushSizeScrollDirection": "Инвертировать прокрутку для размера кисти",
|
||||
"initialFitImageSize": "Подогнать размер изображения при перебросе"
|
||||
"initialFitImageSize": "Подогнать размер изображения при перебросе",
|
||||
"hideBoundingBox": "Скрыть ограничительную рамку",
|
||||
"showBoundingBox": "Показать ограничительную рамку"
|
||||
},
|
||||
"accessibility": {
|
||||
"uploadImage": "Загрузить изображение",
|
||||
@ -847,7 +867,10 @@
|
||||
"editMode": "Открыть в редакторе узлов",
|
||||
"resetToDefaultValue": "Сбросить к стандартному значкнию",
|
||||
"edit": "Редактировать",
|
||||
"noFieldsViewMode": "В этом рабочем процессе нет выбранных полей для отображения. Просмотрите полный рабочий процесс для настройки значений."
|
||||
"noFieldsViewMode": "В этом рабочем процессе нет выбранных полей для отображения. Просмотрите полный рабочий процесс для настройки значений.",
|
||||
"graph": "График",
|
||||
"showEdgeLabels": "Показать метки на ребрах",
|
||||
"showEdgeLabelsHelp": "Показать метки на ребрах, указывающие на соединенные узлы"
|
||||
},
|
||||
"controlnet": {
|
||||
"amult": "a_mult",
|
||||
@ -915,8 +938,8 @@
|
||||
"lineartAnime": "Контурный рисунок в стиле аниме",
|
||||
"mediapipeFaceDescription": "Обнаружение лиц с помощью Mediapipe",
|
||||
"hedDescription": "Целостное обнаружение границ",
|
||||
"setControlImageDimensions": "Установите размеры контрольного изображения на Ш/В",
|
||||
"scribble": "каракули",
|
||||
"setControlImageDimensions": "Скопируйте размер в Ш/В (оптимизируйте для модели)",
|
||||
"scribble": "Штрихи",
|
||||
"maxFaces": "Макс Лица",
|
||||
"mlsdDescription": "Минималистичный детектор отрезков линии",
|
||||
"resizeSimple": "Изменить размер (простой)",
|
||||
@ -931,7 +954,18 @@
|
||||
"small": "Маленький",
|
||||
"body": "Тело",
|
||||
"hands": "Руки",
|
||||
"selectCLIPVisionModel": "Выбрать модель CLIP Vision"
|
||||
"selectCLIPVisionModel": "Выбрать модель CLIP Vision",
|
||||
"ipAdapterMethod": "Метод",
|
||||
"full": "Всё",
|
||||
"mlsd": "M-LSD",
|
||||
"h": "H",
|
||||
"style": "Только стиль",
|
||||
"dwOpenpose": "DW Openpose",
|
||||
"pidi": "PIDI",
|
||||
"composition": "Только композиция",
|
||||
"hed": "HED",
|
||||
"beginEndStepPercentShort": "Начало/конец %",
|
||||
"setControlImageDimensionsForce": "Скопируйте размер в Ш/В (игнорируйте модель)"
|
||||
},
|
||||
"boards": {
|
||||
"autoAddBoard": "Авто добавление Доски",
|
||||
@ -1310,6 +1344,12 @@
|
||||
"paragraphs": [
|
||||
"Плавно укладывайте изображение вдоль вертикальной оси."
|
||||
]
|
||||
},
|
||||
"ipAdapterMethod": {
|
||||
"heading": "Метод",
|
||||
"paragraphs": [
|
||||
"Метод, с помощью которого применяется текущий IP-адаптер."
|
||||
]
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
@ -1357,7 +1397,6 @@
|
||||
"completed": "Выполнено",
|
||||
"queueBack": "Добавить в очередь",
|
||||
"cancelFailed": "Проблема с отменой элемента",
|
||||
"queueCountPrediction": "{{promptsCount}} запросов × {{iterations}} изображений -> {{count}} генераций",
|
||||
"batchQueued": "Пакетная очередь",
|
||||
"pauseFailed": "Проблема с приостановкой рендеринга",
|
||||
"clearFailed": "Проблема с очисткой очереди",
|
||||
@ -1473,7 +1512,11 @@
|
||||
"projectWorkflows": "Рабочие процессы проекта",
|
||||
"defaultWorkflows": "Стандартные рабочие процессы",
|
||||
"name": "Имя",
|
||||
"noRecentWorkflows": "Нет последних рабочих процессов"
|
||||
"noRecentWorkflows": "Нет последних рабочих процессов",
|
||||
"loadWorkflow": "Рабочий процесс $t(common.load)",
|
||||
"convertGraph": "Конвертировать график",
|
||||
"loadFromGraph": "Загрузка рабочего процесса из графика",
|
||||
"autoLayout": "Автоматическое расположение"
|
||||
},
|
||||
"hrf": {
|
||||
"enableHrf": "Включить исправление высокого разрешения",
|
||||
@ -1526,5 +1569,56 @@
|
||||
"addPromptTrigger": "Добавить триггер запроса",
|
||||
"compatibleEmbeddings": "Совместимые встраивания",
|
||||
"noMatchingTriggers": "Нет соответствующих триггеров"
|
||||
},
|
||||
"controlLayers": {
|
||||
"moveToBack": "На задний план",
|
||||
"moveForward": "Переместить вперёд",
|
||||
"moveBackward": "Переместить назад",
|
||||
"brushSize": "Размер кисти",
|
||||
"controlLayers": "Слои управления",
|
||||
"globalMaskOpacity": "Глобальная непрозрачность маски",
|
||||
"autoNegative": "Авто негатив",
|
||||
"deletePrompt": "Удалить запрос",
|
||||
"resetRegion": "Сбросить регион",
|
||||
"debugLayers": "Слои отладки",
|
||||
"rectangle": "Прямоугольник",
|
||||
"maskPreviewColor": "Цвет предпросмотра маски",
|
||||
"addNegativePrompt": "Добавить $t(common.negativePrompt)",
|
||||
"regionalGuidance": "Региональная точность",
|
||||
"opacity": "Непрозрачность",
|
||||
"globalControlAdapter": "Глобальный $t(controlnet.controlAdapter_one)",
|
||||
"globalControlAdapterLayer": "Глобальный $t(controlnet.controlAdapter_one) $t(unifiedCanvas.layer)",
|
||||
"globalIPAdapter": "Глобальный $t(common.ipAdapter)",
|
||||
"globalIPAdapterLayer": "Глобальный $t(common.ipAdapter) $t(unifiedCanvas.layer)",
|
||||
"opacityFilter": "Фильтр непрозрачности",
|
||||
"deleteAll": "Удалить всё",
|
||||
"addLayer": "Добавить слой",
|
||||
"moveToFront": "На передний план",
|
||||
"toggleVisibility": "Переключить видимость слоя",
|
||||
"addPositivePrompt": "Добавить $t(common.positivePrompt)",
|
||||
"addIPAdapter": "Добавить $t(common.ipAdapter)",
|
||||
"regionalGuidanceLayer": "$t(controlLayers.regionalGuidance) $t(unifiedCanvas.layer)",
|
||||
"resetProcessor": "Сброс процессора по умолчанию",
|
||||
"clearProcessor": "Чистый процессор",
|
||||
"globalInitialImage": "Глобальное исходное изображение",
|
||||
"globalInitialImageLayer": "$t(controlLayers.globalInitialImage) $t(unifiedCanvas.layer)",
|
||||
"noLayersAdded": "Без слоев",
|
||||
"layers_one": "Слой",
|
||||
"layers_few": "Слоя",
|
||||
"layers_many": "Слоев"
|
||||
},
|
||||
"ui": {
|
||||
"tabs": {
|
||||
"generation": "Генерация",
|
||||
"canvas": "Холст",
|
||||
"workflowsTab": "$t(ui.tabs.workflows) $t(common.tab)",
|
||||
"models": "Модели",
|
||||
"generationTab": "$t(ui.tabs.generation) $t(common.tab)",
|
||||
"workflows": "Рабочие процессы",
|
||||
"canvasTab": "$t(ui.tabs.canvas) $t(common.tab)",
|
||||
"queueTab": "$t(ui.tabs.queue) $t(common.tab)",
|
||||
"modelsTab": "$t(ui.tabs.models) $t(common.tab)",
|
||||
"queue": "Очередь"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -298,7 +298,8 @@
|
||||
"noImagesInGallery": "Gösterilecek Görsel Yok",
|
||||
"autoSwitchNewImages": "Yeni Görseli Biter Bitmez Gör",
|
||||
"currentlyInUse": "Bu görsel şurada kullanımda:",
|
||||
"deleteImage": "Görseli Sil",
|
||||
"deleteImage_one": "Görseli Sil",
|
||||
"deleteImage_other": "",
|
||||
"loadMore": "Daha Getir",
|
||||
"setCurrentImage": "Çalışma Görseli Yap",
|
||||
"unableToLoad": "Galeri Yüklenemedi",
|
||||
|
@ -66,7 +66,7 @@
|
||||
"saveAs": "保存为",
|
||||
"ai": "ai",
|
||||
"or": "或",
|
||||
"aboutDesc": "使用 Invoke 工作?查看:",
|
||||
"aboutDesc": "使用 Invoke 工作?来看看:",
|
||||
"add": "添加",
|
||||
"loglevel": "日志级别",
|
||||
"copy": "复制",
|
||||
@ -78,7 +78,7 @@
|
||||
"autoSwitchNewImages": "自动切换到新图像",
|
||||
"loadMore": "加载更多",
|
||||
"noImagesInGallery": "无图像可用于显示",
|
||||
"deleteImage": "删除图片",
|
||||
"deleteImage_other": "删除图片",
|
||||
"deleteImageBin": "被删除的图片会发送到你操作系统的回收站。",
|
||||
"deleteImagePermanent": "删除的图片无法被恢复。",
|
||||
"assets": "素材",
|
||||
@ -445,7 +445,6 @@
|
||||
"useX2Model": "图像太大,无法使用 x4 模型,使用 x2 模型作为替代",
|
||||
"tooLarge": "图像太大无法进行放大,请选择更小的图像"
|
||||
},
|
||||
"iterationsWithCount_other": "{{count}} 次迭代生成",
|
||||
"cfgRescaleMultiplier": "CFG 重缩放倍数",
|
||||
"useSize": "使用尺寸",
|
||||
"setToOptimalSize": "优化模型大小",
|
||||
@ -853,7 +852,6 @@
|
||||
"pruneSucceeded": "从队列修剪 {{item_count}} 个已完成的项目",
|
||||
"notReady": "无法排队",
|
||||
"batchFailedToQueue": "批次加入队列失败",
|
||||
"queueCountPrediction": "{{promptsCount}} 提示词 × {{iterations}} 迭代次数 -> {{count}} 次生成",
|
||||
"batchQueued": "加入队列的批次",
|
||||
"front": "前",
|
||||
"pruneTooltip": "修剪 {{item_count}} 个已完成的项目",
|
||||
|
@ -1,3 +1,4 @@
|
||||
/* eslint-disable no-console */
|
||||
import fs from 'node:fs';
|
||||
|
||||
import openapiTS from 'openapi-typescript';
|
||||
|
@ -21,6 +21,7 @@ import i18n from 'i18n';
|
||||
import { size } from 'lodash-es';
|
||||
import { memo, useCallback, useEffect } from 'react';
|
||||
import { ErrorBoundary } from 'react-error-boundary';
|
||||
import { useGetOpenAPISchemaQuery } from 'services/api/endpoints/appInfo';
|
||||
|
||||
import AppErrorBoundaryFallback from './AppErrorBoundaryFallback';
|
||||
import PreselectedImage from './PreselectedImage';
|
||||
@ -46,6 +47,7 @@ const App = ({ config = DEFAULT_CONFIG, selectedImage }: Props) => {
|
||||
useSocketIO();
|
||||
useGlobalModifiersInit();
|
||||
useGlobalHotkeys();
|
||||
useGetOpenAPISchemaQuery();
|
||||
|
||||
const { dropzone, isHandlingUpload, setIsHandlingUpload } = useFullscreenDropzone();
|
||||
|
||||
|
@ -67,6 +67,8 @@ export const useSocketIO = () => {
|
||||
|
||||
if ($isDebugging.get() || import.meta.env.MODE === 'development') {
|
||||
window.$socketOptions = $socketOptions;
|
||||
// This is only enabled manually for debugging, console is allowed.
|
||||
/* eslint-disable-next-line no-console */
|
||||
console.log('Socket initialized', socket);
|
||||
}
|
||||
|
||||
@ -75,6 +77,8 @@ export const useSocketIO = () => {
|
||||
return () => {
|
||||
if ($isDebugging.get() || import.meta.env.MODE === 'development') {
|
||||
window.$socketOptions = undefined;
|
||||
// This is only enabled manually for debugging, console is allowed.
|
||||
/* eslint-disable-next-line no-console */
|
||||
console.log('Socket teardown', socket);
|
||||
}
|
||||
socket.disconnect();
|
||||
|
@ -20,14 +20,14 @@ export type LoggerNamespace =
|
||||
| 'models'
|
||||
| 'config'
|
||||
| 'canvas'
|
||||
| 'txt2img'
|
||||
| 'img2img'
|
||||
| 'generation'
|
||||
| 'nodes'
|
||||
| 'system'
|
||||
| 'socketio'
|
||||
| 'session'
|
||||
| 'queue'
|
||||
| 'dnd';
|
||||
| 'dnd'
|
||||
| 'controlLayers';
|
||||
|
||||
export const logger = (namespace: LoggerNamespace) => $logger.get().child({ namespace });
|
||||
|
||||
|
@ -1,3 +1,6 @@
|
||||
/* eslint-disable no-console */
|
||||
// This is only enabled manually for debugging, console is allowed.
|
||||
|
||||
import type { Middleware, MiddlewareAPI } from '@reduxjs/toolkit';
|
||||
import { diff } from 'jsondiffpatch';
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user