Compare commits
883 Commits
v2.3.0
...
Model-Add-
Author | SHA1 | Date | |
---|---|---|---|
054e963bef | |||
afb66a7884 | |||
25ae36ceb5 | |||
3ae8daedaa | |||
b913e1e11e | |||
3c4b6d5735 | |||
e6123eac19 | |||
30ca25897e | |||
abaee6b9ed | |||
4d7c9e1ab7 | |||
cc5687f26c | |||
78e76f26f9 | |||
9a7580dedd | |||
dc2da8cff4 | |||
019a9f0329 | |||
fe5d9ad171 | |||
dbc0093b31 | |||
92e512b8b6 | |||
dc14701d20 | |||
737e0f3085 | |||
81b7ea4362 | |||
09dfde0ba1 | |||
3ba7e966b5 | |||
a1cd4834d1 | |||
a724038dc6 | |||
4221cf7731 | |||
c34ac91ff0 | |||
5fe38f7c88 | |||
bd7e515290 | |||
076fac07eb | |||
9348161600 | |||
dac3c158a5 | |||
17d8bbf330 | |||
9344687a56 | |||
cf534d735c | |||
501924bc60 | |||
d117251747 | |||
6ea61a8486 | |||
e4d903af20 | |||
2d9797da35 | |||
07ea806553 | |||
5ac0316c62 | |||
9536ba22af | |||
5503749085 | |||
9bfe2fa371 | |||
d8ce6e4426 | |||
43d2d6d98c | |||
64c233efd4 | |||
2245a4e117 | |||
9ceec40b76 | |||
0f13b90059 | |||
d91fc16ae4 | |||
bc01a96f9d | |||
85b2822f5e | |||
c33d8694bb | |||
685bd027f0 | |||
f592d620d5 | |||
2b127b73ac | |||
8855902cfe | |||
9d8ddc6a08 | |||
4ca5189e73 | |||
873597cb84 | |||
44d742f232 | |||
6e7dbf99f3 | |||
1ba1076888 | |||
cafa108f69 | |||
deeff36e16 | |||
d770b14358 | |||
20414ba4ad | |||
92721a1d45 | |||
f329fddab9 | |||
f2efde27f6 | |||
02c58f22be | |||
f751dcd245 | |||
a97107bd90 | |||
b2ce45a417 | |||
4e0b5d85ba | |||
a958ae5e29 | |||
4d50fbf8dc | |||
485f6e5954 | |||
1f6ce838ba | |||
0dc5773849 | |||
bc347f749c | |||
1b215059e7 | |||
db079a2733 | |||
26f71d3536 | |||
eb7ae2588c | |||
278c14ba2e | |||
74e83dda54 | |||
28c1fca477 | |||
1f0324102a | |||
a782ad092d | |||
eae4eb419a | |||
fb7f38f46e | |||
93d0cae455 | |||
35f6b5d562 | |||
2aefa06ef1 | |||
5906888477 | |||
f22c7d0da6 | |||
93b38707b2 | |||
6ecf53078f | |||
9c93b7cb59 | |||
7789e8319c | |||
7d7a28beb3 | |||
27a113d872 | |||
67f8f222d9 | |||
5347c12fed | |||
b194180f76 | |||
fb30b7d17a | |||
c341dcaa3d | |||
b695a2574b | |||
aa68a326c8 | |||
c2922d5991 | |||
85888030c3 | |||
7cf59c1e60 | |||
9738b0ff69 | |||
3021c78390 | |||
6eeaf8d9fb | |||
fa9afec0c2 | |||
d6862bf8c1 | |||
de01c38bbe | |||
7e811908e0 | |||
5f59f24f92 | |||
e414fcf3fb | |||
079ad8f35a | |||
a4d7e0c78e | |||
e9c2f173c5 | |||
44f489d581 | |||
cb48bbd806 | |||
0a761d7c43 | |||
a0f47aa72e | |||
f9abc6fc85 | |||
d840c597b5 | |||
3ca654d256 | |||
e0e01f6c50 | |||
d9dab1b6c7 | |||
3b2ef6e1a8 | |||
c125a3871a | |||
0996bd5acf | |||
ea77d557da | |||
1b01161ea4 | |||
2230cb9562 | |||
9e0c7c46a2 | |||
be305588d3 | |||
9f994df814 | |||
3062580006 | |||
596ba754b1 | |||
b980e563b9 | |||
7fe2606cb3 | |||
0c3b1fe3c4 | |||
c9ee2e351c | |||
e3aef20f42 | |||
60614badaf | |||
288cee9611 | |||
24aca37538 | |||
b853ceea65 | |||
3ee2798ede | |||
5c5106c14a | |||
c367b21c71 | |||
2eef6df66a | |||
300aa8d86c | |||
727f1638d7 | |||
ee6df5852a | |||
90525b1c43 | |||
bbb95dbc5b | |||
f4b7f80d59 | |||
220f7373c8 | |||
4bb5785f29 | |||
f9a7a7d161 | |||
de94c780d9 | |||
0b9230380c | |||
209a55b681 | |||
dc2f69f5d1 | |||
ad2f1b7b36 | |||
dd2d96a50f | |||
2bff28e305 | |||
d68234d879 | |||
b3babf26a5 | |||
ecca0eff31 | |||
28677f9621 | |||
caecfadf11 | |||
5cf8e3aa53 | |||
76cf2c61db | |||
b4d976f2db | |||
777d127c74 | |||
0678803803 | |||
d2fbc9f5e3 | |||
d81088dff7 | |||
1aaad9336f | |||
1f3c024d9d | |||
74a480f94e | |||
c6e8d3269c | |||
dcb5a3a740 | |||
c0ef546b02 | |||
7a78a83651 | |||
10cbf99310 | |||
b63aefcda9 | |||
6a77634b34 | |||
8ca91b1774 | |||
1c9d9e79d5 | |||
3aa1ee1218 | |||
06aa5a8120 | |||
580f9ecded | |||
270032670a | |||
4f056cdb55 | |||
c14241436b | |||
50b56d6088 | |||
8ec2ae7954 | |||
40d82b29cf | |||
0b953d98f5 | |||
8833d76709 | |||
027b316fd2 | |||
d612f11c11 | |||
250b0ab182 | |||
675dd12b6c | |||
7e76eea059 | |||
f45483e519 | |||
65047bf976 | |||
d586a82a53 | |||
28709961e9 | |||
e9f237f39d | |||
4156bfd810 | |||
fe75b95464 | |||
95954188b2 | |||
63f59201f8 | |||
370e8281b3 | |||
685df33584 | |||
4332c9c7a6 | |||
4a00f1cc74 | |||
7ff77504cb | |||
0d1854e44a | |||
fe6858f2d9 | |||
12c7db3a16 | |||
3ecdec02bf | |||
d6c24d59b0 | |||
bb3d1bb6cb | |||
14c8738a71 | |||
1a829bb998 | |||
9d339e94f2 | |||
ad7b1fa6fb | |||
42355b70c2 | |||
faa2558e2f | |||
081397737b | |||
55d36eaf4f | |||
26cd1728ac | |||
a0065da4a4 | |||
c11e823ff3 | |||
197e50a298 | |||
507e12520e | |||
2cc04de397 | |||
f4150a7829 | |||
5418bd3b24 | |||
76d5fa4694 | |||
386dda8233 | |||
8076c1697c | |||
65fc9a6e0e | |||
cde0b6ae8d | |||
b12760b976 | |||
b679a6ba37 | |||
2f5f08c35d | |||
8f48c14ed4 | |||
5d37fa6e36 | |||
f51581bd1b | |||
50ca6b6ffc | |||
63b9ec4c5e | |||
b115bc4247 | |||
dadc30f795 | |||
111d8391e2 | |||
1157b454b2 | |||
8a6473610b | |||
ea7911be89 | |||
9ee648e0c3 | |||
543682fd3b | |||
88cb63e4a1 | |||
76212d1cca | |||
a8df9e5122 | |||
2db180d909 | |||
b716fe8f06 | |||
69e2dc0404 | |||
a38b75572f | |||
e18de761b6 | |||
816ea39827 | |||
1cd4cdd0e5 | |||
768e969c90 | |||
57db66634d | |||
87789c1de8 | |||
c3c1511ec6 | |||
6b41127421 | |||
d232a439f7 | |||
c04f21e83e | |||
8762069b37 | |||
d9ebdd2684 | |||
3e4c10ef9c | |||
17eb2ca5a2 | |||
63725d7534 | |||
00f30ea457 | |||
1b2a3c7144 | |||
01a1777370 | |||
32945c7f45 | |||
b0b8846430 | |||
fdb146a43a | |||
42c1f1fc9d | |||
89a8ef86b5 | |||
f0fb767f57 | |||
4bd93464bf | |||
3d3de82ca9 | |||
c3ff9e6be8 | |||
21f79e5919 | |||
0342e25c74 | |||
91f982fb0b | |||
b9ab43a4bb | |||
6e0e48bf8a | |||
dcc8313dbf | |||
bf5831faa3 | |||
5eff035f55 | |||
7c60068388 | |||
d843fb078a | |||
41b2e4633f | |||
57144ac0cf | |||
a305b6adbf | |||
94daaa4abf | |||
901337186d | |||
7e2f64f60b | |||
126cba2324 | |||
2f9dcd7906 | |||
e537b5d8e1 | |||
e0e70c9222 | |||
1b21e5df54 | |||
4b76af37ae | |||
486c445afb | |||
4547c48013 | |||
8f21201c91 | |||
532b74a206 | |||
0b184913b9 | |||
97719e40e4 | |||
5ad3062b66 | |||
92d012a92d | |||
fc187f263e | |||
fd94f85abe | |||
4e9e1b660d | |||
d01adedff5 | |||
c247f430f7 | |||
3d6a358042 | |||
4d1dcd11de | |||
b33655b0d6 | |||
81dee04dc9 | |||
114018e3e6 | |||
ef8cf83b28 | |||
633857b0e3 | |||
214574d11f | |||
8584665ade | |||
516c56d0c5 | |||
5891b43ce2 | |||
62e75f95aa | |||
b07621e27e | |||
545d8968fd | |||
7cf2f58513 | |||
618e3e5e91 | |||
c703b60986 | |||
7c0ce5c282 | |||
82fe34b1f7 | |||
65f9aae81d | |||
2d9fac23e7 | |||
ebc4b52f41 | |||
c4e6d4b348 | |||
eab32bce6c | |||
55d2094094 | |||
a0d50a2b23 | |||
9efeb1b2ec | |||
86e2cb0428 | |||
53c2c0f91d | |||
bdc7b8b75a | |||
1bfdd54810 | |||
b4bf6c12a5 | |||
ab35c241c2 | |||
b3dccfaeb6 | |||
6477e31c1e | |||
dd4a1c998b | |||
70203e6e5a | |||
d778a7c5ca | |||
f8e59636cd | |||
2d1a0b0a05 | |||
c9b2234d90 | |||
82b224539b | |||
0b15ffb95b | |||
ce9aaab22f | |||
3f53f1186d | |||
c0aff396d2 | |||
955900507f | |||
d606abc544 | |||
44400d2a66 | |||
60a98cacef | |||
6a990565ff | |||
3f0b0f3250 | |||
1a7371ea17 | |||
850d1ee984 | |||
2c7928b163 | |||
87d1ec6a4c | |||
53c62537f7 | |||
418d93fdfd | |||
f2ce2f1778 | |||
5b6c61fc75 | |||
1d77581d96 | |||
3b921cf393 | |||
d334f7f1f6 | |||
8c9764476c | |||
b7d5a3e0b5 | |||
e0405031a7 | |||
ee24b686b3 | |||
835eb14c79 | |||
9aadf7abc1 | |||
243f9e8377 | |||
6e0c6d9cc9 | |||
a3076cf951 | |||
6696882c71 | |||
17b039e85d | |||
81539e6ab4 | |||
92304b9f8a | |||
ec1de5ae8b | |||
49198a61ef | |||
c22d529528 | |||
8c5773abc1 | |||
cd98d88fe7 | |||
34e3aa1f88 | |||
49ffb64ef3 | |||
ec14e2db35 | |||
5725fcb3e0 | |||
1447b6df96 | |||
e700da23d8 | |||
b4ed8bc47a | |||
bd85e00530 | |||
4e446130d8 | |||
4c93b514bb | |||
d078941316 | |||
230d3a496d | |||
ec2890c19b | |||
a540cc537f | |||
39c57aa358 | |||
01f8c37bd3 | |||
2d990c1f54 | |||
7fb2da8741 | |||
b7718985d5 | |||
c69fcb1c10 | |||
90cda11868 | |||
0982548e1f | |||
5cb877e096 | |||
11a29fdc4d | |||
24407048a5 | |||
a7c2333312 | |||
b5b541c747 | |||
ad6ea02c9c | |||
1a6ed85d99 | |||
a094bbd839 | |||
73dda812ea | |||
8eaf1c4033 | |||
4f44b64052 | |||
c559bf3e10 | |||
a485515bc6 | |||
2c9b29725b | |||
28612c899a | |||
88acbeaa35 | |||
46729efe95 | |||
b3d03e1146 | |||
e29c9a7d9e | |||
9b157b6532 | |||
10a1e7962b | |||
cb672d7d00 | |||
e791fb6b0b | |||
1c9001ad21 | |||
3083356cf0 | |||
179814e50a | |||
9515c07fca | |||
a45e94fde7 | |||
8b6196e0a2 | |||
ee2c0ab51b | |||
ca5f129902 | |||
cf2eca7c60 | |||
16aea1e869 | |||
75ff6cd3c3 | |||
7b7b31637c | |||
fca564c18a | |||
eb8d87e185 | |||
dbadb1d7b5 | |||
a4afb69615 | |||
8b7925edf3 | |||
168a51c5a6 | |||
3f5d8c3e44 | |||
609bb19573 | |||
d561d6d3dd | |||
7ffaa17551 | |||
97eac58a50 | |||
cedbe8fcd7 | |||
a461875abd | |||
ab018ccdfe | |||
d41dcdfc46 | |||
972aecc4c5 | |||
6b7be4e5dc | |||
9b1a7b553f | |||
7f99efc5df | |||
0a6d8b4855 | |||
5e41811fb5 | |||
5a4967582e | |||
1d0ba4a1a7 | |||
4878c7a2d5 | |||
9e5aa645a7 | |||
d01e23973e | |||
71bbd78574 | |||
fff41a7349 | |||
d5f524a156 | |||
3ab9d02883 | |||
27a2e27c3a | |||
da04b11a31 | |||
3795b40f63 | |||
9436f2e3d1 | |||
7fadd5e5c4 | |||
4c2a588e1f | |||
5f9de762ff | |||
91f7abb398 | |||
6420b81a5d | |||
b6ed5eafd6 | |||
694d5aa2e8 | |||
833079140b | |||
fd27948c36 | |||
1dfaaa2a57 | |||
bac6b50dd1 | |||
a30c91f398 | |||
17294bfa55 | |||
3fa1771cc9 | |||
f3bd386ff0 | |||
8486ce31de | |||
1d9845557f | |||
55dce6cfdd | |||
58be915446 | |||
dc9268f772 | |||
47ddc00c6a | |||
0d22fd59ed | |||
d5efd57c28 | |||
b52a92da7e | |||
b949162e7e | |||
5409991256 | |||
be1bcbc173 | |||
d6196e863d | |||
63e790b79b | |||
cf53bba99e | |||
ed4c8f6a8a | |||
aab8263c31 | |||
b21bd6f428 | |||
cb6903dfd0 | |||
cd87ca8214 | |||
58e5bf5a58 | |||
f17c7ca6f7 | |||
c3dd28cff9 | |||
db4e1e8b53 | |||
3e43c3e698 | |||
cc7733af1c | |||
2a29734a56 | |||
f2e533f7c8 | |||
078f897b67 | |||
8352ab2076 | |||
1a3d47814b | |||
e852ad0a51 | |||
136cd0e868 | |||
7afe26320a | |||
702da71515 | |||
b313cf8afd | |||
852d78d9ad | |||
5570a88858 | |||
cfd897874b | |||
1249147c57 | |||
eec5c3bbb1 | |||
ca8d9fb885 | |||
7d77fb9691 | |||
a4c0dfb33c | |||
2dded68267 | |||
172ce3dc25 | |||
6c8d4b091e | |||
7beebc3659 | |||
5461318eda | |||
d0abe13b60 | |||
aca9d74489 | |||
a0c213a158 | |||
740210fc99 | |||
ca10d0652f | |||
e1a85d8184 | |||
9d8236c59d | |||
7eafcd47a6 | |||
ded3f13a33 | |||
e5646d7241 | |||
79ac9698c1 | |||
d29f57c93d | |||
9b7cde8918 | |||
8ae71303a5 | |||
2cd7bd4a8e | |||
b813298f2a | |||
58f787f7d4 | |||
2bba543d20 | |||
d3c1b747ee | |||
b9ecf93ba3 | |||
487da8394d | |||
4c93bc56f8 | |||
727dfeae43 | |||
88d561dee7 | |||
7a379f1d4f | |||
3ad89f99d2 | |||
d76c5da514 | |||
da5b0673e7 | |||
d7180afe9d | |||
2e9c15711b | |||
e19b08b149 | |||
234d76a269 | |||
826d941068 | |||
34e449213c | |||
671c5943e4 | |||
16c24ec367 | |||
e8240855e0 | |||
a5e065048e | |||
a53c3269db | |||
8bf93d3a32 | |||
d42cc0fd1c | |||
d2553d783c | |||
10b747d22b | |||
1d567fa593 | |||
3a3dd39d3a | |||
f4b3d7dba2 | |||
de2c7fd372 | |||
b140e1c619 | |||
1308584289 | |||
2ac4778bcf | |||
6101d67dba | |||
3cd50fe3a1 | |||
e683b574d1 | |||
0decd05913 | |||
d01b7ea2d2 | |||
4fa91724d9 | |||
e3d1c64b77 | |||
17f35a7bba | |||
ab2f0a6fbf | |||
41cbf2f7c4 | |||
d5d2e1d7a3 | |||
587faa3e52 | |||
80229ab73e | |||
68b2911d2f | |||
2bf2f627e4 | |||
58676b2ce2 | |||
11f79dc1e1 | |||
2a095ddc8e | |||
dd849d2e91 | |||
8c63fac958 | |||
11a70e9764 | |||
33ce78e4a2 | |||
4f78518858 | |||
fad99ac4d2 | |||
423b592b25 | |||
8aa7d1da55 | |||
6b702c32ca | |||
767012aec0 | |||
2267057e2b | |||
b8212e4dea | |||
5b7e4a5f5d | |||
07f9fa63d0 | |||
1ae8986451 | |||
b305c240de | |||
248dc81ec3 | |||
ebe0071ed2 | |||
7a518218e5 | |||
fc14ac7faa | |||
95e2739c47 | |||
f129393a2e | |||
c55bbd1a85 | |||
ccba41cdb2 | |||
3d442bbf22 | |||
4888d0d832 | |||
47de3fb007 | |||
41bc160cb8 | |||
d0ba155c19 | |||
5f0848bf7d | |||
6551527fe2 | |||
159ce2ea08 | |||
3715570d17 | |||
65a7432b5a | |||
557e28f460 | |||
62a7f252f5 | |||
2fa14200aa | |||
0605cf94f0 | |||
d69156c616 | |||
0963bbbe78 | |||
f3351a5e47 | |||
f3f4c68acc | |||
5d617ce63d | |||
8a0d45ac5a | |||
2468ba7445 | |||
65b7d2db47 | |||
e07f1bb89c | |||
f4f813d108 | |||
6217edcb6c | |||
c5cc832304 | |||
a76038bac4 | |||
ff4942f9b4 | |||
1ccad64871 | |||
19f0022bbe | |||
ecc7b7a700 | |||
e46102124e | |||
314ed7d8f6 | |||
b1341bc611 | |||
07be605dcb | |||
fe318775c3 | |||
1bb07795d8 | |||
caf07479ec | |||
508780d07f | |||
05e67e924c | |||
fb2488314f | |||
062f58209b | |||
7cb9d6b1a6 | |||
fb721234ec | |||
92906aeb08 | |||
cab41f0538 | |||
5d0dcaf81e | |||
9591c8d4e0 | |||
bcb1fbe031 | |||
e87a2fe14b | |||
d00571b5a4 | |||
b08a514594 | |||
265ccaca4a | |||
7aa6c827f7 | |||
093174942b | |||
f299f40763 | |||
7545e38655 | |||
0bc55a0d55 | |||
d38e7170fe | |||
15a9412255 | |||
e29399e032 | |||
bc18a94d8c | |||
5d2bdd478c | |||
9cacba916b | |||
628e82fa79 | |||
fbbbba2fac | |||
9cbf9d52b4 | |||
fb35fe1a41 | |||
b60b5750af | |||
3ff40114fa | |||
71c6ae8789 | |||
d9a7536fa8 | |||
99f4417cd7 | |||
47f94bde04 | |||
197e6b95e3 | |||
8e47ca8d57 | |||
714fff39ba | |||
89239d1c54 | |||
c03d98cf46 | |||
d1ad46d6f1 | |||
6ae7560f66 | |||
e561d19206 | |||
9eed1919c2 | |||
b87f7b1129 | |||
7410a60208 | |||
7c86130a3d | |||
58a1d9aae0 | |||
24e32f6ae2 | |||
3dd7393984 | |||
f18f743d03 | |||
c660dcdfcd | |||
9e0250c0b4 | |||
08c747f1e0 | |||
04ae6fde80 | |||
b1a53c8ef0 | |||
cd64511f24 | |||
1e98e0b159 | |||
4f7af55bc3 | |||
d0e6a57e48 | |||
d28a486769 | |||
84722d92f6 | |||
8a3b5ac21d | |||
717d53a773 | |||
96926d6648 | |||
f3639de8b1 | |||
b71e675e8d | |||
d3c850104b | |||
c00155f6a4 | |||
8753070fc7 | |||
ed8f9f021d | |||
3ccc705396 | |||
11e422cf29 | |||
7f695fed39 | |||
310501cd8a | |||
106b3aea1b | |||
6e52ca3307 | |||
94c31f672f | |||
240bbb9852 | |||
8cf2ed91a9 | |||
7be5b4ca8b | |||
d589ad96aa | |||
097e41e8d2 | |||
4cf43b858d | |||
13a4666a6e | |||
9232290950 | |||
f3153d45bc | |||
d9cb6da951 | |||
17535d887f | |||
35da7f5b96 | |||
4e95a68582 | |||
9dfeb93f80 | |||
02247ffc79 | |||
48da030415 | |||
817e04bee0 | |||
e5d0b0c37d | |||
950f450665 | |||
79daf8b039 | |||
383cbca896 | |||
07c55d5e2a | |||
156151df45 | |||
03b1d71af9 | |||
f6ad107fdd | |||
e2c392631a | |||
4a1b4d63ef | |||
83ecda977c | |||
9601febef8 | |||
0503680efa | |||
57ccec1df3 | |||
22f3634481 | |||
5590c73af2 | |||
1f76b30e54 | |||
8bd04654c7 | |||
0dce3188cc | |||
106c7aa956 | |||
b04f199035 | |||
a2b992dfd1 | |||
745e253a78 | |||
2ea551d37d | |||
8d1481ca10 | |||
307e7e00c2 | |||
c3ad1c8a9f | |||
05d51d7b5b | |||
09f69a4d28 | |||
a338af17c8 | |||
bc82fc0cdd | |||
418a3d6e41 | |||
fbcc52ec3d | |||
47e89f4ba1 | |||
888d3ae968 | |||
a28120abdd | |||
4493d83aea | |||
eff0fb9a69 | |||
5bb0f9bedc | |||
bf812e6493 | |||
a3da12d867 | |||
6b4a06c3fc | |||
3833b28132 | |||
e8f9ab82ed | |||
6ab364b16a | |||
a4dc11addc | |||
0372702eb4 | |||
aa8eeea478 | |||
e54ecc4c37 | |||
4a12c76097 | |||
be72faf78e | |||
28d44d80ed | |||
9008d9996f | |||
be2a9b78bb | |||
70003ee5b1 | |||
45a5ccba84 | |||
f80a64a0f4 | |||
511df2963b | |||
f92f62a91b | |||
7f41893da4 | |||
42da4f57c2 | |||
c2e11dfe83 | |||
17e1930229 | |||
bde94347d3 | |||
b1612afff4 | |||
1d10d952b2 | |||
9150f9ef3c | |||
7bc0f7cc6c | |||
c52d11b24c | |||
59486615dd | |||
f0212cd361 | |||
ee4cb5fdc9 | |||
75b919237b | |||
07a9062e1f | |||
cdb3e18b80 | |||
01eb93d664 | |||
89f69c2d94 | |||
dc6f6fcab7 | |||
6ca177e462 |
@ -3,21 +3,23 @@
|
|||||||
!invokeai
|
!invokeai
|
||||||
!ldm
|
!ldm
|
||||||
!pyproject.toml
|
!pyproject.toml
|
||||||
!README.md
|
|
||||||
|
# ignore frontend/web but whitelist dist
|
||||||
|
invokeai/frontend/web/
|
||||||
|
!invokeai/frontend/web/dist/
|
||||||
|
|
||||||
|
# ignore invokeai/assets but whitelist invokeai/assets/web
|
||||||
|
invokeai/assets/
|
||||||
|
!invokeai/assets/web/
|
||||||
|
|
||||||
# Guard against pulling in any models that might exist in the directory tree
|
# Guard against pulling in any models that might exist in the directory tree
|
||||||
**/*.pt*
|
**/*.pt*
|
||||||
**/*.ckpt
|
**/*.ckpt
|
||||||
|
|
||||||
# ignore frontend but whitelist dist
|
# Byte-compiled / optimized / DLL files
|
||||||
invokeai/frontend/**
|
**/__pycache__/
|
||||||
!invokeai/frontend/dist
|
|
||||||
|
|
||||||
# ignore invokeai/assets but whitelist invokeai/assets/web
|
|
||||||
invokeai/assets
|
|
||||||
!invokeai/assets/web
|
|
||||||
|
|
||||||
# ignore python cache
|
|
||||||
**/__pycache__
|
|
||||||
**/*.py[cod]
|
**/*.py[cod]
|
||||||
**/*.egg-info
|
|
||||||
|
# Distribution / packaging
|
||||||
|
**/*.egg-info/
|
||||||
|
**/*.egg
|
||||||
|
1
.git-blame-ignore-revs
Normal file
@ -0,0 +1 @@
|
|||||||
|
b3dccfaeb636599c02effc377cdd8a87d658256c
|
60
.github/CODEOWNERS
vendored
@ -1,50 +1,34 @@
|
|||||||
# continuous integration
|
# continuous integration
|
||||||
/.github/workflows/ @mauwii
|
/.github/workflows/ @mauwii @lstein @blessedcoolant
|
||||||
|
|
||||||
# documentation
|
# documentation
|
||||||
/docs/ @lstein @mauwii @tildebyte
|
/docs/ @lstein @mauwii @tildebyte @blessedcoolant
|
||||||
mkdocs.yml @lstein @mauwii
|
/mkdocs.yml @lstein @mauwii @blessedcoolant
|
||||||
|
|
||||||
|
# nodes
|
||||||
|
/invokeai/app/ @Kyle0654 @blessedcoolant
|
||||||
|
|
||||||
# installation and configuration
|
# installation and configuration
|
||||||
/pyproject.toml @mauwii @lstein @ebr
|
/pyproject.toml @mauwii @lstein @blessedcoolant
|
||||||
/docker/ @mauwii
|
/docker/ @mauwii @lstein @blessedcoolant
|
||||||
/scripts/ @ebr @lstein
|
/scripts/ @ebr @lstein
|
||||||
/installer/ @ebr @lstein @tildebyte
|
/installer/ @lstein @ebr
|
||||||
ldm/invoke/config @lstein @ebr
|
/invokeai/assets @lstein @ebr
|
||||||
invokeai/assets @lstein @ebr
|
/invokeai/configs @lstein
|
||||||
invokeai/configs @lstein @ebr
|
/invokeai/version @lstein @blessedcoolant
|
||||||
/ldm/invoke/_version.py @lstein @blessedcoolant
|
|
||||||
|
|
||||||
# web ui
|
# web ui
|
||||||
/invokeai/frontend @blessedcoolant @psychedelicious
|
/invokeai/frontend @blessedcoolant @psychedelicious @lstein
|
||||||
/invokeai/backend @blessedcoolant @psychedelicious
|
/invokeai/backend @blessedcoolant @psychedelicious @lstein
|
||||||
|
|
||||||
# generation and model management
|
# generation, model management, postprocessing
|
||||||
/ldm/*.py @lstein
|
/invokeai/backend @keturn @damian0815 @lstein @blessedcoolant @jpphoto
|
||||||
/ldm/generate.py @lstein @keturn
|
|
||||||
/ldm/invoke/args.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/ckpt* @lstein
|
|
||||||
/ldm/invoke/ckpt_generator @lstein
|
|
||||||
/ldm/invoke/CLI.py @lstein
|
|
||||||
/ldm/invoke/config @lstein @ebr @mauwii
|
|
||||||
/ldm/invoke/generator @keturn @damian0815
|
|
||||||
/ldm/invoke/globals.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/merge_diffusers.py @lstein
|
|
||||||
/ldm/invoke/model_manager.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/txt2mask.py @lstein
|
|
||||||
/ldm/invoke/patchmatch.py @Kyle0654
|
|
||||||
/ldm/invoke/restoration @lstein @blessedcoolant
|
|
||||||
|
|
||||||
# attention, textual inversion, model configuration
|
# front ends
|
||||||
/ldm/models @damian0815 @keturn
|
/invokeai/frontend/CLI @lstein
|
||||||
/ldm/modules @damian0815 @keturn
|
/invokeai/frontend/install @lstein @ebr @mauwii
|
||||||
|
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
|
||||||
|
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
|
||||||
|
/invokeai/frontend/web @psychedelicious @blessedcoolant
|
||||||
|
|
||||||
# Nodes
|
|
||||||
apps/ @Kyle0654
|
|
||||||
|
|
||||||
# legacy REST API
|
|
||||||
# is CapableWeb still engaged?
|
|
||||||
/ldm/invoke/pngwriter.py @CapableWeb
|
|
||||||
/ldm/invoke/server_legacy.py @CapableWeb
|
|
||||||
/scripts/legacy_api.py @CapableWeb
|
|
||||||
/tests/legacy_tests.sh @CapableWeb
|
|
||||||
|
10
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@ -65,6 +65,16 @@ body:
|
|||||||
placeholder: 8GB
|
placeholder: 8GB
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: version-number
|
||||||
|
attributes:
|
||||||
|
label: What version did you experience this issue on?
|
||||||
|
description: |
|
||||||
|
Please share the version of Invoke AI that you experienced the issue on. If this is not the latest version, please update first to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||||
|
placeholder: X.X.X
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: what-happened
|
id: what-happened
|
||||||
|
77
.github/workflows/build-container.yml
vendored
@ -3,9 +3,21 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'main'
|
- 'main'
|
||||||
- 'update/ci/*'
|
- 'update/ci/docker/*'
|
||||||
|
- 'update/docker/*'
|
||||||
|
- 'dev/ci/docker/*'
|
||||||
|
- 'dev/docker/*'
|
||||||
|
paths:
|
||||||
|
- 'pyproject.toml'
|
||||||
|
- '.dockerignore'
|
||||||
|
- 'invokeai/**'
|
||||||
|
- 'docker/Dockerfile'
|
||||||
tags:
|
tags:
|
||||||
- 'v*.*.*'
|
- 'v*.*.*'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docker:
|
docker:
|
||||||
@ -14,24 +26,21 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
flavor:
|
flavor:
|
||||||
- amd
|
- rocm
|
||||||
- cuda
|
- cuda
|
||||||
- cpu
|
- cpu
|
||||||
include:
|
include:
|
||||||
- flavor: amd
|
- flavor: rocm
|
||||||
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||||
dockerfile: docker/Dockerfile
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
- flavor: cuda
|
- flavor: cuda
|
||||||
pip-extra-index-url: ''
|
pip-extra-index-url: ''
|
||||||
dockerfile: docker/Dockerfile
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
- flavor: cpu
|
- flavor: cpu
|
||||||
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||||
dockerfile: docker/Dockerfile
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: ${{ matrix.flavor }}
|
name: ${{ matrix.flavor }}
|
||||||
|
env:
|
||||||
|
PLATFORMS: 'linux/amd64,linux/arm64'
|
||||||
|
DOCKERFILE: 'docker/Dockerfile'
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -41,24 +50,27 @@ jobs:
|
|||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@v4
|
||||||
with:
|
with:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
images: ghcr.io/${{ github.repository }}
|
images: |
|
||||||
|
ghcr.io/${{ github.repository }}
|
||||||
|
${{ vars.DOCKERHUB_REPOSITORY }}
|
||||||
tags: |
|
tags: |
|
||||||
type=ref,event=branch
|
type=ref,event=branch
|
||||||
type=ref,event=tag
|
type=ref,event=tag
|
||||||
type=semver,pattern={{version}}
|
type=pep440,pattern={{version}}
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
type=pep440,pattern={{major}}.{{minor}}
|
||||||
type=semver,pattern={{major}}
|
type=pep440,pattern={{major}}
|
||||||
type=sha,enable=true,prefix=sha-,format=short
|
type=sha,enable=true,prefix=sha-,format=short
|
||||||
flavor: |
|
flavor: |
|
||||||
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
|
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
|
||||||
suffix=-${{ matrix.flavor }},onlatest=false
|
suffix=-${{ matrix.flavor }},onlatest=false
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v2
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
with:
|
with:
|
||||||
platforms: ${{ matrix.platforms }}
|
platforms: ${{ env.PLATFORMS }}
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
@ -68,25 +80,34 @@ jobs:
|
|||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build container
|
- name: Build container
|
||||||
|
id: docker_build
|
||||||
uses: docker/build-push-action@v4
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: ${{ matrix.dockerfile }}
|
file: ${{ env.DOCKERFILE }}
|
||||||
platforms: ${{ matrix.platforms }}
|
platforms: ${{ env.PLATFORMS }}
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' }}
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
|
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
|
||||||
cache-from: type=gha
|
cache-from: |
|
||||||
cache-to: type=gha,mode=max
|
type=gha,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
||||||
|
type=gha,scope=main-${{ matrix.flavor }}
|
||||||
|
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
||||||
|
|
||||||
- name: Output image, digest and metadata to summary
|
- name: Docker Hub Description
|
||||||
run: |
|
if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
|
||||||
{
|
uses: peter-evans/dockerhub-description@v3
|
||||||
echo imageid: "${{ steps.docker_build.outputs.imageid }}"
|
with:
|
||||||
echo digest: "${{ steps.docker_build.outputs.digest }}"
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
echo labels: "${{ steps.meta.outputs.labels }}"
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
echo tags: "${{ steps.meta.outputs.tags }}"
|
repository: ${{ vars.DOCKERHUB_REPOSITORY }}
|
||||||
echo version: "${{ steps.meta.outputs.version }}"
|
short-description: ${{ github.event.repository.description }}
|
||||||
} >> "$GITHUB_STEP_SUMMARY"
|
|
||||||
|
27
.github/workflows/close-inactive-issues.yml
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
name: Close inactive issues
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "00 6 * * *"
|
||||||
|
|
||||||
|
env:
|
||||||
|
DAYS_BEFORE_ISSUE_STALE: 14
|
||||||
|
DAYS_BEFORE_ISSUE_CLOSE: 28
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
close-issues:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/stale@v5
|
||||||
|
with:
|
||||||
|
days-before-issue-stale: ${{ env.DAYS_BEFORE_ISSUE_STALE }}
|
||||||
|
days-before-issue-close: ${{ env.DAYS_BEFORE_ISSUE_CLOSE }}
|
||||||
|
stale-issue-label: "Inactive Issue"
|
||||||
|
stale-issue-message: "There has been no activity in this issue for ${{ env.DAYS_BEFORE_ISSUE_STALE }} days. If this issue is still being experienced, please reply with an updated confirmation that the issue is still being experienced with the latest release."
|
||||||
|
close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please recreate the issue."
|
||||||
|
days-before-pr-stale: -1
|
||||||
|
days-before-pr-close: -1
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
operations-per-run: 500
|
22
.github/workflows/lint-frontend.yml
vendored
@ -3,14 +3,22 @@ name: Lint frontend
|
|||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- 'invokeai/frontend/**'
|
- 'invokeai/frontend/web/**'
|
||||||
|
types:
|
||||||
|
- 'ready_for_review'
|
||||||
|
- 'opened'
|
||||||
|
- 'synchronize'
|
||||||
push:
|
push:
|
||||||
|
branches:
|
||||||
|
- 'main'
|
||||||
paths:
|
paths:
|
||||||
- 'invokeai/frontend/**'
|
- 'invokeai/frontend/web/**'
|
||||||
|
merge_group:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
working-directory: invokeai/frontend
|
working-directory: invokeai/frontend/web
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
lint-frontend:
|
lint-frontend:
|
||||||
@ -23,7 +31,7 @@ jobs:
|
|||||||
node-version: '18'
|
node-version: '18'
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- run: 'yarn install --frozen-lockfile'
|
- run: 'yarn install --frozen-lockfile'
|
||||||
- run: 'yarn tsc'
|
- run: 'yarn run lint:tsc'
|
||||||
- run: 'yarn run madge'
|
- run: 'yarn run lint:madge'
|
||||||
- run: 'yarn run lint --max-warnings=0'
|
- run: 'yarn run lint:eslint'
|
||||||
- run: 'yarn run prettier --check'
|
- run: 'yarn run lint:prettier'
|
||||||
|
3
.github/workflows/mkdocs-material.yml
vendored
@ -5,6 +5,9 @@ on:
|
|||||||
- 'main'
|
- 'main'
|
||||||
- 'development'
|
- 'development'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
mkdocs-material:
|
mkdocs-material:
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
|
4
.github/workflows/pypi-release.yml
vendored
@ -3,7 +3,7 @@ name: PyPI Release
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- 'ldm/invoke/_version.py'
|
- 'invokeai/version/invokeai_version.py'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@ -28,7 +28,7 @@ jobs:
|
|||||||
run: twine check dist/*
|
run: twine check dist/*
|
||||||
|
|
||||||
- name: check PyPI versions
|
- name: check PyPI versions
|
||||||
if: github.ref == 'refs/heads/main'
|
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/v2.3'
|
||||||
run: |
|
run: |
|
||||||
pip install --upgrade requests
|
pip install --upgrade requests
|
||||||
python -c "\
|
python -c "\
|
||||||
|
66
.github/workflows/test-invoke-pip-skip.yml
vendored
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
name: Test invoke.py pip
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- '**'
|
||||||
|
- '!pyproject.toml'
|
||||||
|
- '!invokeai/**'
|
||||||
|
- 'invokeai/frontend/web/**'
|
||||||
|
merge_group:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
matrix:
|
||||||
|
if: github.event.pull_request.draft == false
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
python-version:
|
||||||
|
# - '3.9'
|
||||||
|
- '3.10'
|
||||||
|
pytorch:
|
||||||
|
# - linux-cuda-11_6
|
||||||
|
- linux-cuda-11_7
|
||||||
|
- linux-rocm-5_2
|
||||||
|
- linux-cpu
|
||||||
|
- macos-default
|
||||||
|
- windows-cpu
|
||||||
|
# - windows-cuda-11_6
|
||||||
|
# - windows-cuda-11_7
|
||||||
|
include:
|
||||||
|
# - pytorch: linux-cuda-11_6
|
||||||
|
# os: ubuntu-22.04
|
||||||
|
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
||||||
|
# github-env: $GITHUB_ENV
|
||||||
|
- pytorch: linux-cuda-11_7
|
||||||
|
os: ubuntu-22.04
|
||||||
|
github-env: $GITHUB_ENV
|
||||||
|
- pytorch: linux-rocm-5_2
|
||||||
|
os: ubuntu-22.04
|
||||||
|
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||||
|
github-env: $GITHUB_ENV
|
||||||
|
- pytorch: linux-cpu
|
||||||
|
os: ubuntu-22.04
|
||||||
|
extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||||
|
github-env: $GITHUB_ENV
|
||||||
|
- pytorch: macos-default
|
||||||
|
os: macOS-12
|
||||||
|
github-env: $GITHUB_ENV
|
||||||
|
- pytorch: windows-cpu
|
||||||
|
os: windows-2022
|
||||||
|
github-env: $env:GITHUB_ENV
|
||||||
|
# - pytorch: windows-cuda-11_6
|
||||||
|
# os: windows-2022
|
||||||
|
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
||||||
|
# github-env: $env:GITHUB_ENV
|
||||||
|
# - pytorch: windows-cuda-11_7
|
||||||
|
# os: windows-2022
|
||||||
|
# extra-index-url: 'https://download.pytorch.org/whl/cu117'
|
||||||
|
# github-env: $env:GITHUB_ENV
|
||||||
|
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- run: 'echo "No build required"'
|
11
.github/workflows/test-invoke-pip.yml
vendored
@ -3,11 +3,20 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'main'
|
- 'main'
|
||||||
|
paths:
|
||||||
|
- 'pyproject.toml'
|
||||||
|
- 'invokeai/**'
|
||||||
|
- '!invokeai/frontend/web/**'
|
||||||
pull_request:
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- 'pyproject.toml'
|
||||||
|
- 'invokeai/**'
|
||||||
|
- '!invokeai/frontend/web/**'
|
||||||
types:
|
types:
|
||||||
- 'ready_for_review'
|
- 'ready_for_review'
|
||||||
- 'opened'
|
- 'opened'
|
||||||
- 'synchronize'
|
- 'synchronize'
|
||||||
|
merge_group:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
@ -99,7 +108,7 @@ jobs:
|
|||||||
- name: set INVOKEAI_OUTDIR
|
- name: set INVOKEAI_OUTDIR
|
||||||
run: >
|
run: >
|
||||||
python -c
|
python -c
|
||||||
"import os;from ldm.invoke.globals import Globals;OUTDIR=os.path.join(Globals.root,str('outputs'));print(f'INVOKEAI_OUTDIR={OUTDIR}')"
|
"import os;from invokeai.backend.globals import Globals;OUTDIR=os.path.join(Globals.root,str('outputs'));print(f'INVOKEAI_OUTDIR={OUTDIR}')"
|
||||||
>> ${{ matrix.github-env }}
|
>> ${{ matrix.github-env }}
|
||||||
|
|
||||||
- name: run invokeai-configure
|
- name: run invokeai-configure
|
||||||
|
14
.gitignore
vendored
@ -1,4 +1,5 @@
|
|||||||
# ignore default image save location and model symbolic link
|
# ignore default image save location and model symbolic link
|
||||||
|
.idea/
|
||||||
embeddings/
|
embeddings/
|
||||||
outputs/
|
outputs/
|
||||||
models/ldm/stable-diffusion-v1/model.ckpt
|
models/ldm/stable-diffusion-v1/model.ckpt
|
||||||
@ -62,15 +63,18 @@ pip-delete-this-directory.txt
|
|||||||
htmlcov/
|
htmlcov/
|
||||||
.tox/
|
.tox/
|
||||||
.nox/
|
.nox/
|
||||||
|
.coveragerc
|
||||||
.coverage
|
.coverage
|
||||||
.coverage.*
|
.coverage.*
|
||||||
.cache
|
.cache
|
||||||
nosetests.xml
|
nosetests.xml
|
||||||
coverage.xml
|
coverage.xml
|
||||||
|
cov.xml
|
||||||
*.cover
|
*.cover
|
||||||
*.py,cover
|
*.py,cover
|
||||||
.hypothesis/
|
.hypothesis/
|
||||||
.pytest_cache/
|
.pytest_cache/
|
||||||
|
.pytest.ini
|
||||||
cover/
|
cover/
|
||||||
junit/
|
junit/
|
||||||
|
|
||||||
@ -196,7 +200,7 @@ checkpoints
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
|
|
||||||
# Let the frontend manage its own gitignore
|
# Let the frontend manage its own gitignore
|
||||||
!invokeai/frontend/*
|
!invokeai/frontend/web/*
|
||||||
|
|
||||||
# Scratch folder
|
# Scratch folder
|
||||||
.scratch/
|
.scratch/
|
||||||
@ -211,11 +215,6 @@ gfpgan/
|
|||||||
# config file (will be created by installer)
|
# config file (will be created by installer)
|
||||||
configs/models.yaml
|
configs/models.yaml
|
||||||
|
|
||||||
# weights (will be created by installer)
|
|
||||||
models/ldm/stable-diffusion-v1/*.ckpt
|
|
||||||
models/clipseg
|
|
||||||
models/gfpgan
|
|
||||||
|
|
||||||
# ignore initfile
|
# ignore initfile
|
||||||
.invokeai
|
.invokeai
|
||||||
|
|
||||||
@ -230,6 +229,3 @@ installer/install.bat
|
|||||||
installer/install.sh
|
installer/install.sh
|
||||||
installer/update.bat
|
installer/update.bat
|
||||||
installer/update.sh
|
installer/update.sh
|
||||||
|
|
||||||
# no longer stored in source directory
|
|
||||||
models
|
|
158
README.md
@ -1,6 +1,6 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
# InvokeAI: A Stable Diffusion Toolkit
|
# InvokeAI: A Stable Diffusion Toolkit
|
||||||
|
|
||||||
@ -10,10 +10,10 @@
|
|||||||
|
|
||||||
[![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link]
|
[![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link]
|
||||||
|
|
||||||
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link]
|
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link]
|
||||||
|
|
||||||
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||||
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
[CI checks on main link]:https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain
|
||||||
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
||||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||||
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||||
@ -28,12 +28,14 @@
|
|||||||
[latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main
|
[latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main
|
||||||
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||||
|
[translation status badge]: https://hosted.weblate.org/widgets/invokeai/-/svg-badge.svg
|
||||||
|
[translation status link]: https://hosted.weblate.org/engage/invokeai/
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
InvokeAI is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. InvokeAI offers an industry leading Web Interface, interactive Command Line Interface, and also serves as the foundation for multiple commercial products.
|
InvokeAI is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. InvokeAI offers an industry leading Web Interface, interactive Command Line Interface, and also serves as the foundation for multiple commercial products.
|
||||||
|
|
||||||
**Quick links**: [[How to Install](#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
**Quick links**: [[How to Install](https://invoke-ai.github.io/InvokeAI/#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||||
|
|
||||||
_Note: InvokeAI is rapidly evolving. Please use the
|
_Note: InvokeAI is rapidly evolving. Please use the
|
||||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||||
@ -41,38 +43,136 @@ requests. Be sure to use the provided templates. They will help us diagnose issu
|
|||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
# Getting Started with InvokeAI
|
## Table of Contents
|
||||||
|
|
||||||
|
1. [Quick Start](#getting-started-with-invokeai)
|
||||||
|
2. [Installation](#detailed-installation-instructions)
|
||||||
|
3. [Hardware Requirements](#hardware-requirements)
|
||||||
|
4. [Features](#features)
|
||||||
|
5. [Latest Changes](#latest-changes)
|
||||||
|
6. [Troubleshooting](#troubleshooting)
|
||||||
|
7. [Contributing](#contributing)
|
||||||
|
8. [Contributors](#contributors)
|
||||||
|
9. [Support](#support)
|
||||||
|
10. [Further Reading](#further-reading)
|
||||||
|
|
||||||
|
## Getting Started with InvokeAI
|
||||||
|
|
||||||
For full installation and upgrade instructions, please see:
|
For full installation and upgrade instructions, please see:
|
||||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||||
|
|
||||||
|
### Automatic Installer (suggested for 1st time users)
|
||||||
|
|
||||||
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
|
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
|
||||||
|
|
||||||
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
||||||
|
|
||||||
3. Unzip the file.
|
3. Unzip the file.
|
||||||
4. If you are on Windows, double-click on the `install.bat` script. On macOS, open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press return. On Linux, run `install.sh`.
|
|
||||||
5. Wait a while, until it is done.
|
|
||||||
6. The folder where you ran the installer from will now be filled with lots of files. If you are on Windows, double-click on the `invoke.bat` file. On macOS, open a Terminal window, drag `invoke.sh` from the folder into the Terminal, and press return. On Linux, run `invoke.sh`
|
|
||||||
7. Press 2 to open the "browser-based UI", press enter/return, wait a minute or two for Stable Diffusion to start up, then open your browser and go to http://localhost:9090.
|
|
||||||
8. Type `banana sushi` in the box on the top left and click `Invoke`
|
|
||||||
|
|
||||||
|
4. If you are on Windows, double-click on the `install.bat` script. On
|
||||||
|
macOS, open a Terminal window, drag the file `install.sh` from Finder
|
||||||
|
into the Terminal, and press return. On Linux, run `install.sh`.
|
||||||
|
|
||||||
## Table of Contents
|
5. You'll be asked to confirm the location of the folder in which
|
||||||
|
to install InvokeAI and its image generation model files. Pick a
|
||||||
|
location with at least 15 GB of free memory. More if you plan on
|
||||||
|
installing lots of models.
|
||||||
|
|
||||||
1. [Installation](#installation)
|
6. Wait while the installer does its thing. After installing the software,
|
||||||
2. [Hardware Requirements](#hardware-requirements)
|
the installer will launch a script that lets you configure InvokeAI and
|
||||||
3. [Features](#features)
|
select a set of starting image generaiton models.
|
||||||
4. [Latest Changes](#latest-changes)
|
|
||||||
5. [Troubleshooting](#troubleshooting)
|
|
||||||
6. [Contributing](#contributing)
|
|
||||||
7. [Contributors](#contributors)
|
|
||||||
8. [Support](#support)
|
|
||||||
9. [Further Reading](#further-reading)
|
|
||||||
|
|
||||||
## Installation
|
7. Find the folder that InvokeAI was installed into (it is not the
|
||||||
|
same as the unpacked zip file directory!) The default location of this
|
||||||
|
folder (if you didn't change it in step 5) is `~/invokeai` on
|
||||||
|
Linux/Mac systems, and `C:\Users\YourName\invokeai` on Windows. This directory will contain launcher scripts named `invoke.sh` and `invoke.bat`.
|
||||||
|
|
||||||
|
8. On Windows systems, double-click on the `invoke.bat` file. On
|
||||||
|
macOS, open a Terminal window, drag `invoke.sh` from the folder into
|
||||||
|
the Terminal, and press return. On Linux, run `invoke.sh`
|
||||||
|
|
||||||
|
9. Press 2 to open the "browser-based UI", press enter/return, wait a
|
||||||
|
minute or two for Stable Diffusion to start up, then open your browser
|
||||||
|
and go to http://localhost:9090.
|
||||||
|
|
||||||
|
10. Type `banana sushi` in the box on the top left and click `Invoke`
|
||||||
|
|
||||||
|
### Command-Line Installation (for users familiar with Terminals)
|
||||||
|
|
||||||
|
You must have Python 3.9 or 3.10 installed on your machine. Earlier or later versions are
|
||||||
|
not supported.
|
||||||
|
|
||||||
|
1. Open a command-line window on your machine. The PowerShell is recommended for Windows.
|
||||||
|
2. Create a directory to install InvokeAI into. You'll need at least 15 GB of free space:
|
||||||
|
|
||||||
|
```terminal
|
||||||
|
mkdir invokeai
|
||||||
|
````
|
||||||
|
|
||||||
|
3. Create a virtual environment named `.venv` inside this directory and activate it:
|
||||||
|
|
||||||
|
```terminal
|
||||||
|
cd invokeai
|
||||||
|
python -m venv .venv --prompt InvokeAI
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Activate the virtual environment (do it every time you run InvokeAI)
|
||||||
|
|
||||||
|
_For Linux/Mac users:_
|
||||||
|
|
||||||
|
```sh
|
||||||
|
source .venv/bin/activate
|
||||||
|
```
|
||||||
|
|
||||||
|
_For Windows users:_
|
||||||
|
|
||||||
|
```ps
|
||||||
|
.venv\Scripts\activate
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Install the InvokeAI module and its dependencies. Choose the command suited for your platform & GPU.
|
||||||
|
|
||||||
|
_For Windows/Linux with an NVIDIA GPU:_
|
||||||
|
|
||||||
|
```terminal
|
||||||
|
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||||
|
```
|
||||||
|
|
||||||
|
_For Linux with an AMD GPU:_
|
||||||
|
|
||||||
|
```sh
|
||||||
|
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||||
|
```
|
||||||
|
|
||||||
|
_For Macintoshes, either Intel or M1/M2:_
|
||||||
|
|
||||||
|
```sh
|
||||||
|
pip install InvokeAI --use-pep517
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Configure InvokeAI and install a starting set of image generation models (you only need to do this once):
|
||||||
|
|
||||||
|
```terminal
|
||||||
|
invokeai-configure
|
||||||
|
```
|
||||||
|
|
||||||
|
7. Launch the web server (do it every time you run InvokeAI):
|
||||||
|
|
||||||
|
```terminal
|
||||||
|
invokeai --web
|
||||||
|
```
|
||||||
|
|
||||||
|
8. Point your browser to http://localhost:9090 to bring up the web interface.
|
||||||
|
9. Type `banana sushi` in the box on the top left and click `Invoke`.
|
||||||
|
|
||||||
|
Be sure to activate the virtual environment each time before re-launching InvokeAI,
|
||||||
|
using `source .venv/bin/activate` or `.venv\Scripts\activate`.
|
||||||
|
|
||||||
|
### Detailed Installation Instructions
|
||||||
|
|
||||||
This fork is supported across Linux, Windows and Macintosh. Linux
|
This fork is supported across Linux, Windows and Macintosh. Linux
|
||||||
users can use either an Nvidia-based card (with CUDA support) or an
|
users can use either an Nvidia-based card (with CUDA support) or an
|
||||||
@ -80,13 +180,13 @@ AMD card (using the ROCm driver). For full installation and upgrade
|
|||||||
instructions, please see:
|
instructions, please see:
|
||||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
|
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
|
||||||
|
|
||||||
### Hardware Requirements
|
## Hardware Requirements
|
||||||
|
|
||||||
InvokeAI is supported across Linux, Windows and macOS. Linux
|
InvokeAI is supported across Linux, Windows and macOS. Linux
|
||||||
users can use either an Nvidia-based card (with CUDA support) or an
|
users can use either an Nvidia-based card (with CUDA support) or an
|
||||||
AMD card (using the ROCm driver).
|
AMD card (using the ROCm driver).
|
||||||
|
|
||||||
#### System
|
### System
|
||||||
|
|
||||||
You will need one of the following:
|
You will need one of the following:
|
||||||
|
|
||||||
@ -98,11 +198,11 @@ We do not recommend the GTX 1650 or 1660 series video cards. They are
|
|||||||
unable to run in half-precision mode and do not have sufficient VRAM
|
unable to run in half-precision mode and do not have sufficient VRAM
|
||||||
to render 512x512 images.
|
to render 512x512 images.
|
||||||
|
|
||||||
#### Memory
|
### Memory
|
||||||
|
|
||||||
- At least 12 GB Main Memory RAM.
|
- At least 12 GB Main Memory RAM.
|
||||||
|
|
||||||
#### Disk
|
### Disk
|
||||||
|
|
||||||
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
||||||
|
|
||||||
@ -152,13 +252,15 @@ Notes](https://github.com/invoke-ai/InvokeAI/releases) and the
|
|||||||
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
|
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
|
||||||
problems and other issues.
|
problems and other issues.
|
||||||
|
|
||||||
# Contributing
|
## Contributing
|
||||||
|
|
||||||
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
||||||
cleanup, testing, or code reviews, is very much encouraged to do so.
|
cleanup, testing, or code reviews, is very much encouraged to do so.
|
||||||
|
|
||||||
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
|
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
|
||||||
|
|
||||||
|
If you'd like to help with translation, please see our [translation guide](docs/other/TRANSLATION.md).
|
||||||
|
|
||||||
If you are unfamiliar with how
|
If you are unfamiliar with how
|
||||||
to contribute to GitHub projects, here is a
|
to contribute to GitHub projects, here is a
|
||||||
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
|
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
|
||||||
@ -175,6 +277,8 @@ This fork is a combined effort of various people from across the world.
|
|||||||
[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for
|
[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for
|
||||||
their time, hard work and effort.
|
their time, hard work and effort.
|
||||||
|
|
||||||
|
Thanks to [Weblate](https://weblate.org/) for generously providing translation services to this project.
|
||||||
|
|
||||||
### Support
|
### Support
|
||||||
|
|
||||||
For support, please use this repository's GitHub Issues tracking service, or join the Discord.
|
For support, please use this repository's GitHub Issues tracking service, or join the Discord.
|
||||||
|
@ -147,7 +147,7 @@ echo ***** Installed invoke launcher script ******
|
|||||||
rd /s /q binary_installer installer_files
|
rd /s /q binary_installer installer_files
|
||||||
|
|
||||||
@rem preload the models
|
@rem preload the models
|
||||||
call .venv\Scripts\python scripts\configure_invokeai.py
|
call .venv\Scripts\python ldm\invoke\config\invokeai_configure.py
|
||||||
set err_msg=----- model download clone failed -----
|
set err_msg=----- model download clone failed -----
|
||||||
if %errorlevel% neq 0 goto err_exit
|
if %errorlevel% neq 0 goto err_exit
|
||||||
deactivate
|
deactivate
|
||||||
|
4
coverage/.gitignore
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
# Ignore everything in this directory
|
||||||
|
*
|
||||||
|
# Except this file
|
||||||
|
!.gitignore
|
@ -1,71 +1,80 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
ARG PYTHON_VERSION=3.9
|
ARG PYTHON_VERSION=3.9
|
||||||
##################
|
##################
|
||||||
## base image ##
|
## base image ##
|
||||||
##################
|
##################
|
||||||
FROM python:${PYTHON_VERSION}-slim AS python-base
|
FROM --platform=${TARGETPLATFORM} python:${PYTHON_VERSION}-slim AS python-base
|
||||||
|
|
||||||
# prepare for buildkit cache
|
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
||||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean
|
|
||||||
|
|
||||||
# Install necesarry packages
|
# Prepare apt for buildkit cache
|
||||||
|
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
||||||
|
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
RUN \
|
RUN \
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
apt-get update \
|
apt-get update \
|
||||||
&& apt-get install \
|
&& apt-get install -y \
|
||||||
-yqq \
|
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
libgl1-mesa-glx=20.3.* \
|
libgl1-mesa-glx=20.3.* \
|
||||||
libglib2.0-0=2.66.* \
|
libglib2.0-0=2.66.* \
|
||||||
libopencv-dev=4.5.* \
|
libopencv-dev=4.5.*
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# set working directory and path
|
# Set working directory and env
|
||||||
ARG APPDIR=/usr/src
|
ARG APPDIR=/usr/src
|
||||||
ARG APPNAME=InvokeAI
|
ARG APPNAME=InvokeAI
|
||||||
WORKDIR ${APPDIR}
|
WORKDIR ${APPDIR}
|
||||||
ENV PATH=${APPDIR}/${APPNAME}/bin:$PATH
|
ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH
|
||||||
|
# Keeps Python from generating .pyc files in the container
|
||||||
|
ENV PYTHONDONTWRITEBYTECODE 1
|
||||||
|
# Turns off buffering for easier container logging
|
||||||
|
ENV PYTHONUNBUFFERED 1
|
||||||
|
# Don't fall back to legacy build system
|
||||||
|
ENV PIP_USE_PEP517=1
|
||||||
|
|
||||||
#######################
|
#######################
|
||||||
## build pyproject ##
|
## build pyproject ##
|
||||||
#######################
|
#######################
|
||||||
FROM python-base AS pyproject-builder
|
FROM python-base AS pyproject-builder
|
||||||
ENV PIP_USE_PEP517=1
|
|
||||||
|
|
||||||
# prepare for buildkit cache
|
# Install build dependencies
|
||||||
|
RUN \
|
||||||
|
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
|
apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
build-essential=12.9 \
|
||||||
|
gcc=4:10.2.* \
|
||||||
|
python3-dev=3.9.*
|
||||||
|
|
||||||
|
# Prepare pip for buildkit cache
|
||||||
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
|
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
|
||||||
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
|
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
|
||||||
RUN mkdir -p ${PIP_CACHE_DIR}
|
RUN mkdir -p ${PIP_CACHE_DIR}
|
||||||
|
|
||||||
# Install dependencies
|
# Create virtual environment
|
||||||
RUN \
|
|
||||||
--mount=type=cache,target=${PIP_CACHE_DIR} \
|
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
apt-get update \
|
|
||||||
&& apt-get install \
|
|
||||||
-yqq \
|
|
||||||
--no-install-recommends \
|
|
||||||
build-essential=12.9 \
|
|
||||||
gcc=4:10.2.* \
|
|
||||||
python3-dev=3.9.* \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# create virtual environment
|
|
||||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||||
python3 -m venv "${APPNAME}" \
|
python3 -m venv "${APPNAME}" \
|
||||||
--upgrade-deps
|
--upgrade-deps
|
||||||
|
|
||||||
# copy sources
|
# Install requirements
|
||||||
COPY --link . .
|
COPY --link pyproject.toml .
|
||||||
|
COPY --link invokeai/version/invokeai_version.py invokeai/version/__init__.py invokeai/version/
|
||||||
# install pyproject.toml
|
|
||||||
ARG PIP_EXTRA_INDEX_URL
|
ARG PIP_EXTRA_INDEX_URL
|
||||||
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
||||||
ARG PIP_PACKAGE=.
|
|
||||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||||
"${APPDIR}/${APPNAME}/bin/pip" install ${PIP_PACKAGE}
|
"${APPNAME}"/bin/pip install .
|
||||||
|
|
||||||
# build patchmatch
|
# Install pyproject.toml
|
||||||
|
COPY --link . .
|
||||||
|
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||||
|
"${APPNAME}/bin/pip" install .
|
||||||
|
|
||||||
|
# Build patchmatch
|
||||||
RUN python3 -c "from patchmatch import patch_match"
|
RUN python3 -c "from patchmatch import patch_match"
|
||||||
|
|
||||||
#####################
|
#####################
|
||||||
@ -73,14 +82,26 @@ RUN python3 -c "from patchmatch import patch_match"
|
|||||||
#####################
|
#####################
|
||||||
FROM python-base AS runtime
|
FROM python-base AS runtime
|
||||||
|
|
||||||
# setup environment
|
# Create a new user
|
||||||
COPY --from=pyproject-builder --link ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME}
|
ARG UNAME=appuser
|
||||||
ENV INVOKEAI_ROOT=/data
|
RUN useradd \
|
||||||
ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
|
--no-log-init \
|
||||||
|
-m \
|
||||||
|
-U \
|
||||||
|
"${UNAME}"
|
||||||
|
|
||||||
# set Entrypoint and default CMD
|
# Create volume directory
|
||||||
|
ARG VOLUME_DIR=/data
|
||||||
|
RUN mkdir -p "${VOLUME_DIR}" \
|
||||||
|
&& chown -hR "${UNAME}:${UNAME}" "${VOLUME_DIR}"
|
||||||
|
|
||||||
|
# Setup runtime environment
|
||||||
|
USER ${UNAME}:${UNAME}
|
||||||
|
COPY --chown=${UNAME}:${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME}
|
||||||
|
ENV INVOKEAI_ROOT ${VOLUME_DIR}
|
||||||
|
ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache
|
||||||
|
ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only"
|
||||||
|
EXPOSE 9090
|
||||||
ENTRYPOINT [ "invokeai" ]
|
ENTRYPOINT [ "invokeai" ]
|
||||||
CMD [ "--web", "--host=0.0.0.0" ]
|
CMD [ "--web", "--host", "0.0.0.0", "--port", "9090" ]
|
||||||
VOLUME [ "/data" ]
|
VOLUME [ "${VOLUME_DIR}" ]
|
||||||
|
|
||||||
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
|
||||||
|
@ -1,19 +1,24 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
|
# If you want to build a specific flavor, set the CONTAINER_FLAVOR environment variable
|
||||||
# Some possible pip extra-index urls (cuda 11.7 is available without extra url):
|
# e.g. CONTAINER_FLAVOR=cpu ./build.sh
|
||||||
# CUDA 11.6: https://download.pytorch.org/whl/cu116
|
# Possible Values are:
|
||||||
# ROCm 5.2: https://download.pytorch.org/whl/rocm5.2
|
# - cpu
|
||||||
# CPU: https://download.pytorch.org/whl/cpu
|
# - cuda
|
||||||
# as found on https://pytorch.org/get-started/locally/
|
# - rocm
|
||||||
|
# Don't forget to also set it when executing run.sh
|
||||||
|
# if it is not set, the script will try to detect the flavor by itself.
|
||||||
|
#
|
||||||
|
# Doc can be found here:
|
||||||
|
# https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
||||||
|
|
||||||
SCRIPTDIR=$(dirname "$0")
|
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||||
cd "$SCRIPTDIR" || exit 1
|
cd "$SCRIPTDIR" || exit 1
|
||||||
|
|
||||||
source ./env.sh
|
source ./env.sh
|
||||||
|
|
||||||
DOCKERFILE=${INVOKE_DOCKERFILE:-Dockerfile}
|
DOCKERFILE=${INVOKE_DOCKERFILE:-./Dockerfile}
|
||||||
|
|
||||||
# print the settings
|
# print the settings
|
||||||
echo -e "You are using these values:\n"
|
echo -e "You are using these values:\n"
|
||||||
@ -21,23 +26,25 @@ echo -e "Dockerfile:\t\t${DOCKERFILE}"
|
|||||||
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
|
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
|
||||||
echo -e "Volumename:\t\t${VOLUMENAME}"
|
echo -e "Volumename:\t\t${VOLUMENAME}"
|
||||||
echo -e "Platform:\t\t${PLATFORM}"
|
echo -e "Platform:\t\t${PLATFORM}"
|
||||||
echo -e "Registry:\t\t${CONTAINER_REGISTRY}"
|
echo -e "Container Registry:\t${CONTAINER_REGISTRY}"
|
||||||
echo -e "Repository:\t\t${CONTAINER_REPOSITORY}"
|
echo -e "Container Repository:\t${CONTAINER_REPOSITORY}"
|
||||||
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
|
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
|
||||||
|
echo -e "Container Flavor:\t${CONTAINER_FLAVOR}"
|
||||||
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
|
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
|
||||||
|
|
||||||
# Create docker volume
|
# Create docker volume
|
||||||
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
|
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
|
||||||
echo -e "Volume already exists\n"
|
echo -e "Volume already exists\n"
|
||||||
else
|
else
|
||||||
echo -n "createing docker volume "
|
echo -n "creating docker volume "
|
||||||
docker volume create "${VOLUMENAME}"
|
docker volume create "${VOLUMENAME}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Build Container
|
# Build Container
|
||||||
DOCKER_BUILDKIT=1 docker build \
|
docker build \
|
||||||
--platform="${PLATFORM}" \
|
--platform="${PLATFORM:-linux/amd64}" \
|
||||||
--tag="${CONTAINER_IMAGE}" \
|
--tag="${CONTAINER_IMAGE:-invokeai}" \
|
||||||
|
${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \
|
||||||
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
|
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
|
||||||
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
|
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
|
||||||
--file="${DOCKERFILE}" \
|
--file="${DOCKERFILE}" \
|
||||||
|
@ -1,19 +1,31 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# This file is used to set environment variables for the build.sh and run.sh scripts.
|
||||||
|
|
||||||
|
# Try to detect the container flavor if no PIP_EXTRA_INDEX_URL got specified
|
||||||
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
|
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
|
||||||
|
|
||||||
|
# Activate virtual environment if not already activated and exists
|
||||||
|
if [[ -z $VIRTUAL_ENV ]]; then
|
||||||
|
[[ -e "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" ]] \
|
||||||
|
&& source "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" \
|
||||||
|
&& echo "Activated virtual environment: $VIRTUAL_ENV"
|
||||||
|
fi
|
||||||
|
|
||||||
# Decide which container flavor to build if not specified
|
# Decide which container flavor to build if not specified
|
||||||
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
|
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
|
||||||
# Check for CUDA and ROCm
|
# Check for CUDA and ROCm
|
||||||
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
|
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
|
||||||
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
|
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
|
||||||
if [[ "$(uname -s)" != "Darwin" && "${CUDA_AVAILABLE}" == "True" ]]; then
|
if [[ "${CUDA_AVAILABLE}" == "True" ]]; then
|
||||||
CONTAINER_FLAVOR="cuda"
|
CONTAINER_FLAVOR="cuda"
|
||||||
elif [[ "$(uname -s)" != "Darwin" && "${ROCM_AVAILABLE}" == "True" ]]; then
|
elif [[ "${ROCM_AVAILABLE}" == "True" ]]; then
|
||||||
CONTAINER_FLAVOR="rocm"
|
CONTAINER_FLAVOR="rocm"
|
||||||
else
|
else
|
||||||
CONTAINER_FLAVOR="cpu"
|
CONTAINER_FLAVOR="cpu"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Set PIP_EXTRA_INDEX_URL based on container flavor
|
# Set PIP_EXTRA_INDEX_URL based on container flavor
|
||||||
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
|
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
|
||||||
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
|
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
|
||||||
@ -26,9 +38,10 @@ fi
|
|||||||
|
|
||||||
# Variables shared by build.sh and run.sh
|
# Variables shared by build.sh and run.sh
|
||||||
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
|
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
|
||||||
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME,,}_data"}"
|
REPOSITORY_NAME="${REPOSITORY_NAME,,}"
|
||||||
|
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME}_data"}"
|
||||||
ARCH="${ARCH-$(uname -m)}"
|
ARCH="${ARCH-$(uname -m)}"
|
||||||
PLATFORM="${PLATFORM-Linux/${ARCH}}"
|
PLATFORM="${PLATFORM-linux/${ARCH}}"
|
||||||
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
|
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
|
||||||
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
|
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
|
||||||
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
|
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
|
||||||
@ -36,3 +49,6 @@ CONTAINER_FLAVOR="${CONTAINER_FLAVOR-cuda}"
|
|||||||
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
|
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
|
||||||
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
|
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
|
||||||
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"
|
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"
|
||||||
|
|
||||||
|
# enable docker buildkit
|
||||||
|
export DOCKER_BUILDKIT=1
|
||||||
|
@ -1,14 +1,16 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
|
# How to use: https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
||||||
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
|
|
||||||
|
|
||||||
SCRIPTDIR=$(dirname "$0")
|
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||||
cd "$SCRIPTDIR" || exit 1
|
cd "$SCRIPTDIR" || exit 1
|
||||||
|
|
||||||
source ./env.sh
|
source ./env.sh
|
||||||
|
|
||||||
|
# Create outputs directory if it does not exist
|
||||||
|
[[ -d ./outputs ]] || mkdir ./outputs
|
||||||
|
|
||||||
echo -e "You are using these values:\n"
|
echo -e "You are using these values:\n"
|
||||||
echo -e "Volumename:\t${VOLUMENAME}"
|
echo -e "Volumename:\t${VOLUMENAME}"
|
||||||
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
|
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
|
||||||
@ -19,13 +21,21 @@ docker run \
|
|||||||
--tty \
|
--tty \
|
||||||
--rm \
|
--rm \
|
||||||
--platform="${PLATFORM}" \
|
--platform="${PLATFORM}" \
|
||||||
--name="${REPOSITORY_NAME,,}" \
|
--name="${REPOSITORY_NAME}" \
|
||||||
--hostname="${REPOSITORY_NAME,,}" \
|
--hostname="${REPOSITORY_NAME}" \
|
||||||
--mount=source="${VOLUMENAME}",target=/data \
|
--mount type=volume,volume-driver=local,source="${VOLUMENAME}",target=/data \
|
||||||
${MODELSPATH:+-u "$(id -u):$(id -g)"} \
|
--mount type=bind,source="$(pwd)"/outputs/,target=/data/outputs/ \
|
||||||
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
|
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
|
||||||
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
|
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
|
||||||
--publish=9090:9090 \
|
--publish=9090:9090 \
|
||||||
--cap-add=sys_nice \
|
--cap-add=sys_nice \
|
||||||
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
|
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
|
||||||
"${CONTAINER_IMAGE}" ${1:+$@}
|
"${CONTAINER_IMAGE}" ${@:+$@}
|
||||||
|
|
||||||
|
echo -e "\nCleaning trash folder ..."
|
||||||
|
for f in outputs/.Trash*; do
|
||||||
|
if [ -e "$f" ]; then
|
||||||
|
rm -Rf "$f"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
BIN
docs/assets/contributing/html-detail.png
Normal file
After Width: | Height: | Size: 470 KiB |
BIN
docs/assets/contributing/html-overview.png
Normal file
After Width: | Height: | Size: 457 KiB |
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 84 KiB |
BIN
docs/assets/installer-walkthrough/installing-models.png
Normal file
After Width: | Height: | Size: 128 KiB |
BIN
docs/assets/installer-walkthrough/settings-form.png
Normal file
After Width: | Height: | Size: 114 KiB |
93
docs/contributing/ARCHITECTURE.md
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
# Invoke.AI Architecture
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TB
|
||||||
|
|
||||||
|
subgraph apps[Applications]
|
||||||
|
webui[WebUI]
|
||||||
|
cli[CLI]
|
||||||
|
|
||||||
|
subgraph webapi[Web API]
|
||||||
|
api[HTTP API]
|
||||||
|
sio[Socket.IO]
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph invoke[Invoke]
|
||||||
|
direction LR
|
||||||
|
invoker
|
||||||
|
services
|
||||||
|
sessions
|
||||||
|
invocations
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph core[AI Core]
|
||||||
|
Generate
|
||||||
|
end
|
||||||
|
|
||||||
|
webui --> webapi
|
||||||
|
webapi --> invoke
|
||||||
|
cli --> invoke
|
||||||
|
|
||||||
|
invoker --> services & sessions
|
||||||
|
invocations --> services
|
||||||
|
sessions --> invocations
|
||||||
|
|
||||||
|
services --> core
|
||||||
|
|
||||||
|
%% Styles
|
||||||
|
classDef sg fill:#5028C8,font-weight:bold,stroke-width:2,color:#fff,stroke:#14141A
|
||||||
|
classDef default stroke-width:2px,stroke:#F6B314,color:#fff,fill:#14141A
|
||||||
|
|
||||||
|
class apps,webapi,invoke,core sg
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## Applications
|
||||||
|
|
||||||
|
Applications are built on top of the invoke framework. They should construct `invoker` and then interact through it. They should avoid interacting directly with core code in order to support a variety of configurations.
|
||||||
|
|
||||||
|
### Web UI
|
||||||
|
|
||||||
|
The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.tiangolo.com/) and [Socket.IO](https://socket.io/). The frontend code is found in `/frontend` and the backend code is found in `/ldm/invoke/app/api_app.py` and `/ldm/invoke/app/api/`. The code is further organized as such:
|
||||||
|
|
||||||
|
| Component | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| api_app.py | Sets up the API app, annotates the OpenAPI spec with additional data, and runs the API |
|
||||||
|
| dependencies | Creates all invoker services and the invoker, and provides them to the API |
|
||||||
|
| events | An eventing system that could in the future be adapted to support horizontal scale-out |
|
||||||
|
| sockets | The Socket.IO interface - handles listening to and emitting session events (events are defined in the events service module) |
|
||||||
|
| routers | API definitions for different areas of API functionality |
|
||||||
|
|
||||||
|
### CLI
|
||||||
|
|
||||||
|
The CLI is built automatically from invocation metadata, and also supports invocation piping and auto-linking. Code is available in `/ldm/invoke/app/cli_app.py`.
|
||||||
|
|
||||||
|
## Invoke
|
||||||
|
|
||||||
|
The Invoke framework provides the interface to the underlying AI systems and is built with flexibility and extensibility in mind. There are four major concepts: invoker, sessions, invocations, and services.
|
||||||
|
|
||||||
|
### Invoker
|
||||||
|
|
||||||
|
The invoker (`/ldm/invoke/app/services/invoker.py`) is the primary interface through which applications interact with the framework. Its primary purpose is to create, manage, and invoke sessions. It also maintains two sets of services:
|
||||||
|
- **invocation services**, which are used by invocations to interact with core functionality.
|
||||||
|
- **invoker services**, which are used by the invoker to manage sessions and manage the invocation queue.
|
||||||
|
|
||||||
|
### Sessions
|
||||||
|
|
||||||
|
Invocations and links between them form a graph, which is maintained in a session. Sessions can be queued for invocation, which will execute their graph (either the next ready invocation, or all invocations). Sessions also maintain execution history for the graph (including storage of any outputs). An invocation may be added to a session at any time, and there is capability to add and entire graph at once, as well as to automatically link new invocations to previous invocations. Invocations can not be deleted or modified once added.
|
||||||
|
|
||||||
|
The session graph does not support looping. This is left as an application problem to prevent additional complexity in the graph.
|
||||||
|
|
||||||
|
### Invocations
|
||||||
|
|
||||||
|
Invocations represent individual units of execution, with inputs and outputs. All invocations are located in `/ldm/invoke/app/invocations`, and are all automatically discovered and made available in the applications. These are the primary way to expose new functionality in Invoke.AI, and the [implementation guide](INVOCATIONS.md) explains how to add new invocations.
|
||||||
|
|
||||||
|
### Services
|
||||||
|
|
||||||
|
Services provide invocations access AI Core functionality and other necessary functionality (e.g. image storage). These are available in `/ldm/invoke/app/services`. As a general rule, new services should provide an interface as an abstract base class, and may provide a lightweight local implementation by default in their module. The goal for all services should be to enable the usage of different implementations (e.g. using cloud storage for image storage), but should not load any module dependencies unless that implementation has been used (i.e. don't import anything that won't be used, especially if it's expensive to import).
|
||||||
|
|
||||||
|
## AI Core
|
||||||
|
|
||||||
|
The AI Core is represented by the rest of the code base (i.e. the code outside of `/ldm/invoke/app/`).
|
105
docs/contributing/INVOCATIONS.md
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
# Invocations
|
||||||
|
|
||||||
|
Invocations represent a single operation, its inputs, and its outputs. These operations and their outputs can be chained together to generate and modify images.
|
||||||
|
|
||||||
|
## Creating a new invocation
|
||||||
|
|
||||||
|
To create a new invocation, either find the appropriate module file in `/ldm/invoke/app/invocations` to add your invocation to, or create a new one in that folder. All invocations in that folder will be discovered and made available to the CLI and API automatically. Invocations make use of [typing](https://docs.python.org/3/library/typing.html) and [pydantic](https://pydantic-docs.helpmanual.io/) for validation and integration into the CLI and API.
|
||||||
|
|
||||||
|
An invocation looks like this:
|
||||||
|
|
||||||
|
```py
|
||||||
|
class UpscaleInvocation(BaseInvocation):
|
||||||
|
"""Upscales an image."""
|
||||||
|
type: Literal['upscale'] = 'upscale'
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField,None] = Field(description="The input image")
|
||||||
|
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
|
||||||
|
level: Literal[2,4] = Field(default=2, description = "The upscale level")
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(self.image.image_type, self.image.image_name)
|
||||||
|
results = context.services.generate.upscale_and_reconstruct(
|
||||||
|
image_list = [[image, 0]],
|
||||||
|
upscale = (self.level, self.strength),
|
||||||
|
strength = 0.0, # GFPGAN strength
|
||||||
|
save_original = False,
|
||||||
|
image_callback = None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now
|
||||||
|
# TODO: can this return multiple results?
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(context.graph_execution_state_id, self.id)
|
||||||
|
context.services.images.save(image_type, image_name, results[0][0])
|
||||||
|
return ImageOutput(
|
||||||
|
image = ImageField(image_type = image_type, image_name = image_name)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Each portion is important to implement correctly.
|
||||||
|
|
||||||
|
### Class definition and type
|
||||||
|
```py
|
||||||
|
class UpscaleInvocation(BaseInvocation):
|
||||||
|
"""Upscales an image."""
|
||||||
|
type: Literal['upscale'] = 'upscale'
|
||||||
|
```
|
||||||
|
All invocations must derive from `BaseInvocation`. They should have a docstring that declares what they do in a single, short line. They should also have a `type` with a type hint that's `Literal["command_name"]`, where `command_name` is what the user will type on the CLI or use in the API to create this invocation. The `command_name` must be unique. The `type` must be assigned to the value of the literal in the type hint.
|
||||||
|
|
||||||
|
### Inputs
|
||||||
|
```py
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField,None] = Field(description="The input image")
|
||||||
|
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
|
||||||
|
level: Literal[2,4] = Field(default=2, description="The upscale level")
|
||||||
|
```
|
||||||
|
Inputs consist of three parts: a name, a type hint, and a `Field` with default, description, and validation information. For example:
|
||||||
|
| Part | Value | Description |
|
||||||
|
| ---- | ----- | ----------- |
|
||||||
|
| Name | `strength` | This field is referred to as `strength` |
|
||||||
|
| Type Hint | `float` | This field must be of type `float` |
|
||||||
|
| Field | `Field(default=0.75, gt=0, le=1, description="The strength")` | The default value is `0.75`, the value must be in the range (0,1], and help text will show "The strength" for this field. |
|
||||||
|
|
||||||
|
Notice that `image` has type `Union[ImageField,None]`. The `Union` allows this field to be parsed with `None` as a value, which enables linking to previous invocations. All fields should either provide a default value or allow `None` as a value, so that they can be overwritten with a linked output from another invocation.
|
||||||
|
|
||||||
|
The special type `ImageField` is also used here. All images are passed as `ImageField`, which protects them from pydantic validation errors (since images only ever come from links).
|
||||||
|
|
||||||
|
Finally, note that for all linking, the `type` of the linked fields must match. If the `name` also matches, then the field can be **automatically linked** to a previous invocation by name and matching.
|
||||||
|
|
||||||
|
### Invoke Function
|
||||||
|
```py
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(self.image.image_type, self.image.image_name)
|
||||||
|
results = context.services.generate.upscale_and_reconstruct(
|
||||||
|
image_list = [[image, 0]],
|
||||||
|
upscale = (self.level, self.strength),
|
||||||
|
strength = 0.0, # GFPGAN strength
|
||||||
|
save_original = False,
|
||||||
|
image_callback = None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(context.graph_execution_state_id, self.id)
|
||||||
|
context.services.images.save(image_type, image_name, results[0][0])
|
||||||
|
return ImageOutput(
|
||||||
|
image = ImageField(image_type = image_type, image_name = image_name)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
The `invoke` function is the last portion of an invocation. It is provided an `InvocationContext` which contains services to perform work as well as a `session_id` for use as needed. It should return a class with output values that derives from `BaseInvocationOutput`.
|
||||||
|
|
||||||
|
Before being called, the invocation will have all of its fields set from defaults, inputs, and finally links (overriding in that order).
|
||||||
|
|
||||||
|
Assume that this invocation may be running simultaneously with other invocations, may be running on another machine, or in other interesting scenarios. If you need functionality, please provide it as a service in the `InvocationServices` class, and make sure it can be overridden.
|
||||||
|
|
||||||
|
### Outputs
|
||||||
|
```py
|
||||||
|
class ImageOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output an image"""
|
||||||
|
type: Literal['image'] = 'image'
|
||||||
|
|
||||||
|
image: ImageField = Field(default=None, description="The output image")
|
||||||
|
```
|
||||||
|
Output classes look like an invocation class without the invoke method. Prefer to use an existing output class if available, and prefer to name inputs the same as outputs when possible, to promote automatic invocation linking.
|
83
docs/contributing/LOCAL_DEVELOPMENT.md
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
# Local Development
|
||||||
|
|
||||||
|
If you are looking to contribute you will need to have a local development
|
||||||
|
environment. See the
|
||||||
|
[Developer Install](../installation/020_INSTALL_MANUAL.md#developer-install) for
|
||||||
|
full details.
|
||||||
|
|
||||||
|
Broadly this involves cloning the repository, installing the pre-reqs, and
|
||||||
|
InvokeAI (in editable form). Assuming this is working, choose your area of
|
||||||
|
focus.
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
We use [mkdocs](https://www.mkdocs.org) for our documentation with the
|
||||||
|
[material theme](https://squidfunk.github.io/mkdocs-material/). Documentation is
|
||||||
|
written in markdown files under the `./docs` folder and then built into a static
|
||||||
|
website for hosting with GitHub Pages at
|
||||||
|
[invoke-ai.github.io/InvokeAI](https://invoke-ai.github.io/InvokeAI).
|
||||||
|
|
||||||
|
To contribute to the documentation you'll need to install the dependencies. Note
|
||||||
|
the use of `"`.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
pip install ".[docs]"
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, to run the documentation locally with hot-reloading for changes made.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
mkdocs serve
|
||||||
|
```
|
||||||
|
|
||||||
|
You'll then be prompted to connect to `http://127.0.0.1:8080` in order to
|
||||||
|
access.
|
||||||
|
|
||||||
|
## Backend
|
||||||
|
|
||||||
|
The backend is contained within the `./invokeai/backend` folder structure. To
|
||||||
|
get started however please install the development dependencies.
|
||||||
|
|
||||||
|
From the root of the repository run the following command. Note the use of `"`.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
pip install ".[test]"
|
||||||
|
```
|
||||||
|
|
||||||
|
This in an optional group of packages which is defined within the
|
||||||
|
`pyproject.toml` and will be required for testing the changes you make the the
|
||||||
|
code.
|
||||||
|
|
||||||
|
### Running Tests
|
||||||
|
|
||||||
|
We use [pytest](https://docs.pytest.org/en/7.2.x/) for our test suite. Tests can
|
||||||
|
be found under the `./tests` folder and can be run with a single `pytest`
|
||||||
|
command. Optionally, to review test coverage you can append `--cov`.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
pytest --cov
|
||||||
|
```
|
||||||
|
|
||||||
|
Test outcomes and coverage will be reported in the terminal. In addition a more
|
||||||
|
detailed report is created in both XML and HTML format in the `./coverage`
|
||||||
|
folder. The HTML one in particular can help identify missing statements
|
||||||
|
requiring tests to ensure coverage. This can be run by opening
|
||||||
|
`./coverage/html/index.html`.
|
||||||
|
|
||||||
|
For example.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
pytest --cov; open ./coverage/html/index.html
|
||||||
|
```
|
||||||
|
|
||||||
|
??? info "HTML coverage report output"
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## Front End
|
||||||
|
|
||||||
|
<!--#TODO: get input from blessedcoolant here, for the moment inserted the frontend README via snippets extension.-->
|
||||||
|
|
||||||
|
--8<-- "invokeai/frontend/web/README.md"
|
@ -214,6 +214,8 @@ Here are the invoke> command that apply to txt2img:
|
|||||||
| `--variation <float>` | `-v<float>` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S<seed>` and `-n<int>` to generate a series a riffs on a starting image. See [Variations](./VARIATIONS.md). |
|
| `--variation <float>` | `-v<float>` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S<seed>` and `-n<int>` to generate a series a riffs on a starting image. See [Variations](./VARIATIONS.md). |
|
||||||
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
|
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
|
||||||
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
|
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
|
||||||
|
| `--h_symmetry_time_pct <float>` | | `None` | Create symmetry along the X axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
|
||||||
|
| `--v_symmetry_time_pct <float>` | | `None` | Create symmetry along the Y axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
|
|
||||||
|
@ -168,11 +168,15 @@ used by Stable Diffusion 1.4 and 1.5.
|
|||||||
After installation, your `models.yaml` should contain an entry that looks like
|
After installation, your `models.yaml` should contain an entry that looks like
|
||||||
this one:
|
this one:
|
||||||
|
|
||||||
inpainting-1.5: weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
|
```yml
|
||||||
description: SD inpainting v1.5 config:
|
inpainting-1.5:
|
||||||
configs/stable-diffusion/v1-inpainting-inference.yaml vae:
|
weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
|
||||||
models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt width: 512
|
description: SD inpainting v1.5
|
||||||
height: 512
|
config: configs/stable-diffusion/v1-inpainting-inference.yaml
|
||||||
|
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||||
|
width: 512
|
||||||
|
height: 512
|
||||||
|
```
|
||||||
|
|
||||||
As shown in the example, you may include a VAE fine-tuning weights file as well.
|
As shown in the example, you may include a VAE fine-tuning weights file as well.
|
||||||
This is strongly recommended.
|
This is strongly recommended.
|
||||||
|
@ -40,7 +40,7 @@ for adj in adjectives:
|
|||||||
print(f'a {adj} day -A{samp} -C{cg}')
|
print(f'a {adj} day -A{samp} -C{cg}')
|
||||||
```
|
```
|
||||||
|
|
||||||
It's output looks like this (abbreviated):
|
Its output looks like this (abbreviated):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
a sunny day -Aklms -C7.5
|
a sunny day -Aklms -C7.5
|
||||||
|
@ -17,7 +17,7 @@ notebooks.
|
|||||||
|
|
||||||
You will need a GPU to perform training in a reasonable length of
|
You will need a GPU to perform training in a reasonable length of
|
||||||
time, and at least 12 GB of VRAM. We recommend using the [`xformers`
|
time, and at least 12 GB of VRAM. We recommend using the [`xformers`
|
||||||
library](../installation/070_INSTALL_XFORMERS) to accelerate the
|
library](../installation/070_INSTALL_XFORMERS.md) to accelerate the
|
||||||
training process further. During training, about ~8 GB is temporarily
|
training process further. During training, about ~8 GB is temporarily
|
||||||
needed in order to store intermediate models, checkpoints and logs.
|
needed in order to store intermediate models, checkpoints and logs.
|
||||||
|
|
||||||
@ -250,6 +250,24 @@ invokeai-ti \
|
|||||||
--only_save_embeds
|
--only_save_embeds
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Using Embeddings
|
||||||
|
|
||||||
|
After training completes, the resultant embeddings will be saved into your `$INVOKEAI_ROOT/embeddings/<trigger word>/learned_embeds.bin`.
|
||||||
|
|
||||||
|
These will be automatically loaded when you start InvokeAI.
|
||||||
|
|
||||||
|
Add the trigger word, surrounded by angle brackets, to use that embedding. For example, if your trigger word was `terence`, use `<terence>` in prompts. This is the same syntax used by the HuggingFace concepts library.
|
||||||
|
|
||||||
|
**Note:** `.pt` embeddings do not require the angle brackets.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### `Cannot load embedding for <trigger>. It was trained on a model with token dimension 1024, but the current model has token dimension 768`
|
||||||
|
|
||||||
|
Messages like this indicate you trained the embedding on a different base model than the currently selected one.
|
||||||
|
|
||||||
|
For example, in the error above, the training was done on SD2.1 (768x768) but it was used on SD1.5 (512x512).
|
||||||
|
|
||||||
## Reading
|
## Reading
|
||||||
|
|
||||||
For more information on textual inversion, please see the following
|
For more information on textual inversion, please see the following
|
||||||
|
@ -1,19 +0,0 @@
|
|||||||
<!-- HTML for static distribution bundle build -->
|
|
||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="UTF-8">
|
|
||||||
<title>Swagger UI</title>
|
|
||||||
<link rel="stylesheet" type="text/css" href="swagger-ui/swagger-ui.css" />
|
|
||||||
<link rel="stylesheet" type="text/css" href="swagger-ui/index.css" />
|
|
||||||
<link rel="icon" type="image/png" href="swagger-ui/favicon-32x32.png" sizes="32x32" />
|
|
||||||
<link rel="icon" type="image/png" href="swagger-ui/favicon-16x16.png" sizes="16x16" />
|
|
||||||
</head>
|
|
||||||
|
|
||||||
<body>
|
|
||||||
<div id="swagger-ui"></div>
|
|
||||||
<script src="swagger-ui/swagger-ui-bundle.js" charset="UTF-8"> </script>
|
|
||||||
<script src="swagger-ui/swagger-ui-standalone-preset.js" charset="UTF-8"> </script>
|
|
||||||
<script src="swagger-ui/swagger-initializer.js" charset="UTF-8"> </script>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@ -40,9 +40,10 @@ experimental versions later.
|
|||||||
this, open up a command-line window ("Terminal" on Linux and
|
this, open up a command-line window ("Terminal" on Linux and
|
||||||
Macintosh, "Command" or "Powershell" on Windows) and type `python
|
Macintosh, "Command" or "Powershell" on Windows) and type `python
|
||||||
--version`. If Python is installed, it will print out the version
|
--version`. If Python is installed, it will print out the version
|
||||||
number. If it is version `3.9.1` or `3.10.x`, you meet
|
number. If it is version `3.9.*` or `3.10.*`, you meet
|
||||||
requirements.
|
requirements. We do not recommend using Python 3.11 or higher,
|
||||||
|
as not all the libraries that InvokeAI depends on work properly
|
||||||
|
with this version.
|
||||||
|
|
||||||
!!! warning "What to do if you have an unsupported version"
|
!!! warning "What to do if you have an unsupported version"
|
||||||
|
|
||||||
@ -50,8 +51,7 @@ experimental versions later.
|
|||||||
and download the appropriate installer package for your
|
and download the appropriate installer package for your
|
||||||
platform. We recommend [Version
|
platform. We recommend [Version
|
||||||
3.10.9](https://www.python.org/downloads/release/python-3109/),
|
3.10.9](https://www.python.org/downloads/release/python-3109/),
|
||||||
which has been extensively tested with InvokeAI. At this time
|
which has been extensively tested with InvokeAI.
|
||||||
we do not recommend Python 3.11.
|
|
||||||
|
|
||||||
_Please select your platform in the section below for platform-specific
|
_Please select your platform in the section below for platform-specific
|
||||||
setup requirements._
|
setup requirements._
|
||||||
@ -150,7 +150,7 @@ experimental versions later.
|
|||||||
|
|
||||||
```cmd
|
```cmd
|
||||||
C:\Documents\Linco> cd InvokeAI-Installer
|
C:\Documents\Linco> cd InvokeAI-Installer
|
||||||
C:\Documents\Linco\invokeAI> install.bat
|
C:\Documents\Linco\invokeAI> .\install.bat
|
||||||
```
|
```
|
||||||
|
|
||||||
7. **Select the location to install InvokeAI**: The script will ask you to choose where to install InvokeAI. Select a
|
7. **Select the location to install InvokeAI**: The script will ask you to choose where to install InvokeAI. Select a
|
||||||
@ -167,6 +167,11 @@ experimental versions later.
|
|||||||
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
|
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
|
||||||
on Macintoshes, where "YourName" is your login name.
|
on Macintoshes, where "YourName" is your login name.
|
||||||
|
|
||||||
|
-If you have previously installed InvokeAI, you will be asked to
|
||||||
|
confirm whether you want to reinstall into this directory. You
|
||||||
|
may choose to reinstall, in which case your version will be upgraded,
|
||||||
|
or choose a different directory.
|
||||||
|
|
||||||
- The script uses tab autocompletion to suggest directory path completions.
|
- The script uses tab autocompletion to suggest directory path completions.
|
||||||
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
|
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
|
||||||
to suggest completions.
|
to suggest completions.
|
||||||
@ -181,11 +186,6 @@ experimental versions later.
|
|||||||
are unsure what GPU you are using, you can ask the installer to
|
are unsure what GPU you are using, you can ask the installer to
|
||||||
guess.
|
guess.
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||

|
|
||||||
</figure>
|
|
||||||
|
|
||||||
|
|
||||||
9. **Watch it go!**: Sit back and let the install script work. It will install the third-party
|
9. **Watch it go!**: Sit back and let the install script work. It will install the third-party
|
||||||
libraries needed by InvokeAI and the application itself.
|
libraries needed by InvokeAI and the application itself.
|
||||||
|
|
||||||
@ -197,25 +197,141 @@ experimental versions later.
|
|||||||
minutes and nothing is happening, you can interrupt the script with ^C. You
|
minutes and nothing is happening, you can interrupt the script with ^C. You
|
||||||
may restart it and it will pick up where it left off.
|
may restart it and it will pick up where it left off.
|
||||||
|
|
||||||
10. **Post-install Configuration**: After installation completes, the installer will launch the
|
|
||||||
configuration script, which will guide you through the first-time
|
|
||||||
process of selecting one or more Stable Diffusion model weights
|
|
||||||
files, downloading and configuring them. We provide a list of
|
|
||||||
popular models that InvokeAI performs well with. However, you can
|
|
||||||
add more weight files later on using the command-line client or
|
|
||||||
the Web UI. See [Installing Models](050_INSTALLING_MODELS.md) for
|
|
||||||
details.
|
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
If you have already downloaded the weights file(s) for another Stable
|
10. **Post-install Configuration**: After installation completes, the
|
||||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
installer will launch the configuration form, which will guide you
|
||||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
through the first-time process of adjusting some of InvokeAI's
|
||||||
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
|
startup settings. To move around this form use ctrl-N for
|
||||||
|
<N>ext and ctrl-P for <P>revious, or use <tab>
|
||||||
|
and shift-<tab> to move forward and back. Once you are in a
|
||||||
|
multi-checkbox field use the up and down cursor keys to select the
|
||||||
|
item you want, and <space> to toggle it on and off. Within
|
||||||
|
a directory field, pressing <tab> will provide autocomplete
|
||||||
|
options.
|
||||||
|
|
||||||
11. **Running InvokeAI for the first time**: The script will now exit and you'll be ready to generate some images. Look
|
Generally the defaults are fine, and you can come back to this screen at
|
||||||
|
any time to tweak your system. Here are the options you can adjust:
|
||||||
|
|
||||||
|
- ***Output directory for images***
|
||||||
|
This is the path to a directory in which InvokeAI will store all its
|
||||||
|
generated images.
|
||||||
|
|
||||||
|
- ***NSFW checker***
|
||||||
|
If checked, InvokeAI will test images for potential sexual content
|
||||||
|
and blur them out if found. Note that the NSFW checker consumes
|
||||||
|
an additional 0.6 GB of VRAM on top of the 2-3 GB of VRAM used
|
||||||
|
by most image models. If you have a low VRAM GPU (4-6 GB), you
|
||||||
|
can reduce out of memory errors by disabling the checker.
|
||||||
|
|
||||||
|
- ***HuggingFace Access Token***
|
||||||
|
InvokeAI has the ability to download embedded styles and subjects
|
||||||
|
from the HuggingFace Concept Library on-demand. However, some of
|
||||||
|
the concept library files are password protected. To make download
|
||||||
|
smoother, you can set up an account at huggingface.co, obtain an
|
||||||
|
access token, and paste it into this field. Note that you paste
|
||||||
|
to this screen using ctrl-shift-V
|
||||||
|
|
||||||
|
- ***Free GPU memory after each generation***
|
||||||
|
This is useful for low-memory machines and helps minimize the
|
||||||
|
amount of GPU VRAM used by InvokeAI.
|
||||||
|
|
||||||
|
- ***Enable xformers support if available***
|
||||||
|
If the xformers library was successfully installed, this will activate
|
||||||
|
it to reduce memory consumption and increase rendering speed noticeably.
|
||||||
|
Note that xformers has the side effect of generating slightly different
|
||||||
|
images even when presented with the same seed and other settings.
|
||||||
|
|
||||||
|
- ***Force CPU to be used on GPU systems***
|
||||||
|
This will use the (slow) CPU rather than the accelerated GPU. This
|
||||||
|
can be used to generate images on systems that don't have a compatible
|
||||||
|
GPU.
|
||||||
|
|
||||||
|
- ***Precision***
|
||||||
|
This controls whether to use float32 or float16 arithmetic.
|
||||||
|
float16 uses less memory but is also slightly less accurate.
|
||||||
|
Ordinarily the right arithmetic is picked automatically ("auto"),
|
||||||
|
but you may have to use float32 to get images on certain systems
|
||||||
|
and graphics cards. The "autocast" option is deprecated and
|
||||||
|
shouldn't be used unless you are asked to by a member of the team.
|
||||||
|
|
||||||
|
- ***Number of models to cache in CPU memory***
|
||||||
|
This allows you to keep models in memory and switch rapidly among
|
||||||
|
them rather than having them load from disk each time. This slider
|
||||||
|
controls how many models to keep loaded at once. Each
|
||||||
|
model will use 2-4 GB of RAM, so use this cautiously
|
||||||
|
|
||||||
|
- ***Directory containing embedding/textual inversion files***
|
||||||
|
This is the directory in which you can place custom embedding
|
||||||
|
files (.pt or .bin). During startup, this directory will be
|
||||||
|
scanned and InvokeAI will print out the text terms that
|
||||||
|
are available to trigger the embeddings.
|
||||||
|
|
||||||
|
At the bottom of the screen you will see a checkbox for accepting
|
||||||
|
the CreativeML Responsible AI License. You need to accept the license
|
||||||
|
in order to download Stable Diffusion models from the next screen.
|
||||||
|
|
||||||
|
_You can come back to the startup options form_ as many times as you like.
|
||||||
|
From the `invoke.sh` or `invoke.bat` launcher, select option (6) to relaunch
|
||||||
|
this script. On the command line, it is named `invokeai-configure`.
|
||||||
|
|
||||||
|
11. **Downloading Models**: After you press `[NEXT]` on the screen, you will be taken
|
||||||
|
to another screen that prompts you to download a series of starter models. The ones
|
||||||
|
we recommend are preselected for you, but you are encouraged to use the checkboxes to
|
||||||
|
pick and choose.
|
||||||
|
You will probably wish to download `autoencoder-840000` for use with models that
|
||||||
|
were trained with an older version of the Stability VAE.
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|

|
||||||
|
</figure>
|
||||||
|
|
||||||
|
Below the preselected list of starter models is a large text field which you can use
|
||||||
|
to specify a series of models to import. You can specify models in a variety of formats,
|
||||||
|
each separated by a space or newline. The formats accepted are:
|
||||||
|
|
||||||
|
- The path to a .ckpt or .safetensors file. On most systems, you can drag a file from
|
||||||
|
the file browser to the textfield to automatically paste the path. Be sure to remove
|
||||||
|
extraneous quotation marks and other things that come along for the ride.
|
||||||
|
|
||||||
|
- The path to a directory containing a combination of `.ckpt` and `.safetensors` files.
|
||||||
|
The directory will be scanned from top to bottom (including subfolders) and any
|
||||||
|
file that can be imported will be.
|
||||||
|
|
||||||
|
- A URL pointing to a `.ckpt` or `.safetensors` file. You can cut
|
||||||
|
and paste directly from a web page, or simply drag the link from the web page
|
||||||
|
or navigation bar. (You can also use ctrl-shift-V to paste into this field)
|
||||||
|
The file will be downloaded and installed.
|
||||||
|
|
||||||
|
- The HuggingFace repository ID (repo_id) for a `diffusers` model. These IDs have
|
||||||
|
the format _author_name/model_name_, as in `andite/anything-v4.0`
|
||||||
|
|
||||||
|
- The path to a local directory containing a `diffusers`
|
||||||
|
model. These directories always have the file `model_index.json`
|
||||||
|
at their top level.
|
||||||
|
|
||||||
|
_Select a directory for models to import_ You may select a local
|
||||||
|
directory for autoimporting at startup time. If you select this
|
||||||
|
option, the directory you choose will be scanned for new
|
||||||
|
.ckpt/.safetensors files each time InvokeAI starts up, and any new
|
||||||
|
files will be automatically imported and made available for your
|
||||||
|
use.
|
||||||
|
|
||||||
|
_Convert imported models into diffusers_ When legacy checkpoint
|
||||||
|
files are imported, you may select to use them unmodified (the
|
||||||
|
default) or to convert them into `diffusers` models. The latter
|
||||||
|
load much faster and have slightly better rendering performance,
|
||||||
|
but not all checkpoint files can be converted. Note that Stable Diffusion
|
||||||
|
Version 2.X files are **only** supported in `diffusers` format and will
|
||||||
|
be converted regardless.
|
||||||
|
|
||||||
|
_You can come back to the model install form_ as many times as you like.
|
||||||
|
From the `invoke.sh` or `invoke.bat` launcher, select option (5) to relaunch
|
||||||
|
this script. On the command line, it is named `invokeai-model-install`.
|
||||||
|
|
||||||
|
12. **Running InvokeAI for the first time**: The script will now exit and you'll be ready to generate some images. Look
|
||||||
for the directory `invokeai` installed in the location you chose at the
|
for the directory `invokeai` installed in the location you chose at the
|
||||||
beginning of the install session. Look for a shell script named `invoke.sh`
|
beginning of the install session. Look for a shell script named `invoke.sh`
|
||||||
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
|
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
|
||||||
@ -301,7 +417,7 @@ Then type the following commands:
|
|||||||
|
|
||||||
=== "AMD System"
|
=== "AMD System"
|
||||||
```bash
|
```bash
|
||||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||||
```
|
```
|
||||||
|
|
||||||
### Corrupted configuration file
|
### Corrupted configuration file
|
||||||
@ -327,6 +443,52 @@ the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
|||||||
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
|
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
|
||||||
assistance.
|
assistance.
|
||||||
|
|
||||||
|
### Out of Memory Issues
|
||||||
|
|
||||||
|
The models are large, VRAM is expensive, and you may find yourself
|
||||||
|
faced with Out of Memory errors when generating images. Here are some
|
||||||
|
tips to reduce the problem:
|
||||||
|
|
||||||
|
* **4 GB of VRAM**
|
||||||
|
|
||||||
|
This should be adequate for 512x512 pixel images using Stable Diffusion 1.5
|
||||||
|
and derived models, provided that you **disable** the NSFW checker. To
|
||||||
|
disable the filter, do one of the following:
|
||||||
|
|
||||||
|
* Select option (6) "_change InvokeAI startup options_" from the
|
||||||
|
launcher. This will bring up the console-based startup settings
|
||||||
|
dialogue and allow you to unselect the "NSFW Checker" option.
|
||||||
|
* Start the startup settings dialogue directly by running
|
||||||
|
`invokeai-configure --skip-sd-weights --skip-support-models`
|
||||||
|
from the command line.
|
||||||
|
* Find the `invokeai.init` initialization file in the InvokeAI root
|
||||||
|
directory, open it in a text editor, and change `--nsfw_checker`
|
||||||
|
to `--no-nsfw_checker`
|
||||||
|
|
||||||
|
If you are on a CUDA system, you can realize significant memory
|
||||||
|
savings by activating the `xformers` library as described above. The
|
||||||
|
downside is `xformers` introduces non-deterministic behavior, such
|
||||||
|
that images generated with exactly the same prompt and settings will
|
||||||
|
be slightly different from each other. See above for more information.
|
||||||
|
|
||||||
|
* **6 GB of VRAM**
|
||||||
|
|
||||||
|
This is a border case. Using the SD 1.5 series you should be able to
|
||||||
|
generate images up to 640x640 with the NSFW checker enabled, and up to
|
||||||
|
1024x1024 with it disabled and `xformers` activated.
|
||||||
|
|
||||||
|
If you run into persistent memory issues there are a series of
|
||||||
|
environment variables that you can set before launching InvokeAI that
|
||||||
|
alter how the PyTorch machine learning library manages memory. See
|
||||||
|
https://pytorch.org/docs/stable/notes/cuda.html#memory-management for
|
||||||
|
a list of these tweaks.
|
||||||
|
|
||||||
|
* **12 GB of VRAM**
|
||||||
|
|
||||||
|
This should be sufficient to generate larger images up to about
|
||||||
|
1280x1280. If you wish to push further, consider activating
|
||||||
|
`xformers`.
|
||||||
|
|
||||||
### Other Problems
|
### Other Problems
|
||||||
|
|
||||||
If you run into problems during or after installation, the InvokeAI team is
|
If you run into problems during or after installation, the InvokeAI team is
|
||||||
@ -348,25 +510,11 @@ version (recommended), follow these steps:
|
|||||||
1. Start the `invoke.sh`/`invoke.bat` launch script from within the
|
1. Start the `invoke.sh`/`invoke.bat` launch script from within the
|
||||||
`invokeai` root directory.
|
`invokeai` root directory.
|
||||||
|
|
||||||
2. Choose menu item (6) "Developer's Console". This will launch a new
|
2. Choose menu item (10) "Update InvokeAI".
|
||||||
command line.
|
|
||||||
|
|
||||||
3. Type the following command:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install InvokeAI --upgrade
|
|
||||||
```
|
|
||||||
4. Watch the installation run. Once it is complete, you may exit the
|
|
||||||
command line by typing `exit`, and then start InvokeAI from the
|
|
||||||
launch script as per usual.
|
|
||||||
|
|
||||||
|
|
||||||
Alternatively, if you wish to get the most recent unreleased
|
|
||||||
development version, perform the same steps to enter the developer's
|
|
||||||
console, and then type:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install https://github.com/invoke-ai/InvokeAI/archive/refs/heads/main.zip
|
|
||||||
```
|
|
||||||
|
|
||||||
|
3. This will launch a menu that gives you the option of:
|
||||||
|
|
||||||
|
1. Updating to the latest official release;
|
||||||
|
2. Updating to the bleeding-edge development version; or
|
||||||
|
3. Manually entering the tag or branch name of a version of
|
||||||
|
InvokeAI you wish to try out.
|
||||||
|
@ -30,25 +30,35 @@ Installation](010_INSTALL_AUTOMATED.md), and in many cases will
|
|||||||
already be installed (if, for example, you have used your system for
|
already be installed (if, for example, you have used your system for
|
||||||
gaming):
|
gaming):
|
||||||
|
|
||||||
* **Python** version 3.9 or 3.10 (3.11 is not recommended).
|
* **Python**
|
||||||
|
|
||||||
* **CUDA Tools** For those with _NVidia GPUs_, you will need to
|
version 3.9 or 3.10 (3.11 is not recommended).
|
||||||
install the [CUDA toolkit and optionally the XFormers library](070_INSTALL_XFORMERS.md).
|
|
||||||
|
|
||||||
* **ROCm Tools** For _Linux users with AMD GPUs_, you will need
|
* **CUDA Tools**
|
||||||
to install the [ROCm toolkit](./030_INSTALL_CUDA_AND_ROCM.md). Note that
|
|
||||||
InvokeAI does not support AMD GPUs on Windows systems due to
|
|
||||||
lack of a Windows ROCm library.
|
|
||||||
|
|
||||||
* **Visual C++ Libraries** _Windows users_ must install the free
|
For those with _NVidia GPUs_, you will need to
|
||||||
[Visual C++ libraries from Microsoft](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170)
|
install the [CUDA toolkit and optionally the XFormers library](070_INSTALL_XFORMERS.md).
|
||||||
|
|
||||||
* **The Xcode command line tools** for _Macintosh users_. Instructions are
|
* **ROCm Tools**
|
||||||
available at [Free Code Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
|
|
||||||
|
|
||||||
* _Macintosh users_ may also need to run the `Install Certificates` command
|
For _Linux users with AMD GPUs_, you will need
|
||||||
if model downloads give lots of certificate errors. Run:
|
to install the [ROCm toolkit](./030_INSTALL_CUDA_AND_ROCM.md). Note that
|
||||||
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
InvokeAI does not support AMD GPUs on Windows systems due to
|
||||||
|
lack of a Windows ROCm library.
|
||||||
|
|
||||||
|
* **Visual C++ Libraries**
|
||||||
|
|
||||||
|
_Windows users_ must install the free
|
||||||
|
[Visual C++ libraries from Microsoft](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170)
|
||||||
|
|
||||||
|
* **The Xcode command line tools**
|
||||||
|
|
||||||
|
for _Macintosh users_. Instructions are available at
|
||||||
|
[Free Code Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
|
||||||
|
|
||||||
|
* _Macintosh users_ may also need to run the `Install Certificates` command
|
||||||
|
if model downloads give lots of certificate errors. Run:
|
||||||
|
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||||
|
|
||||||
### Installation Walkthrough
|
### Installation Walkthrough
|
||||||
|
|
||||||
@ -75,7 +85,7 @@ manager, please follow these steps:
|
|||||||
=== "Linux/Mac"
|
=== "Linux/Mac"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export INVOKEAI_ROOT="~/invokeai"
|
export INVOKEAI_ROOT=~/invokeai
|
||||||
mkdir $INVOKEAI_ROOT
|
mkdir $INVOKEAI_ROOT
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -99,35 +109,30 @@ manager, please follow these steps:
|
|||||||
Windows environment variable using the Advanced System Settings dialogue.
|
Windows environment variable using the Advanced System Settings dialogue.
|
||||||
Refer to your operating system documentation for details.
|
Refer to your operating system documentation for details.
|
||||||
|
|
||||||
|
```terminal
|
||||||
=== "Linux/Mac"
|
cd $INVOKEAI_ROOT
|
||||||
```bash
|
python -m venv .venv --prompt InvokeAI
|
||||||
cd $INVOKEAI_ROOT
|
```
|
||||||
python -m venv create .venv
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Windows"
|
|
||||||
```bash
|
|
||||||
cd $INVOKEAI_ROOT
|
|
||||||
python -m venv create .venv
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Activate the new environment:
|
4. Activate the new environment:
|
||||||
|
|
||||||
=== "Linux/Mac"
|
=== "Linux/Mac"
|
||||||
```bash
|
|
||||||
|
```bash
|
||||||
source .venv/bin/activate
|
source .venv/bin/activate
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Windows"
|
=== "Windows"
|
||||||
```bash
|
|
||||||
.venv\script\activate
|
|
||||||
```
|
|
||||||
If you get a permissions error at this point, run the command
|
|
||||||
`Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope CurrentUser`
|
|
||||||
and try `activate` again.
|
|
||||||
|
|
||||||
The command-line prompt should change to to show `(.venv)` at the
|
```ps
|
||||||
|
.venv\Scripts\activate
|
||||||
|
```
|
||||||
|
|
||||||
|
If you get a permissions error at this point, run this command and try again
|
||||||
|
|
||||||
|
`Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser`
|
||||||
|
|
||||||
|
The command-line prompt should change to to show `(InvokeAI)` at the
|
||||||
beginning of the prompt. Note that all the following steps should be
|
beginning of the prompt. Note that all the following steps should be
|
||||||
run while inside the INVOKEAI_ROOT directory
|
run while inside the INVOKEAI_ROOT directory
|
||||||
|
|
||||||
@ -137,40 +142,47 @@ manager, please follow these steps:
|
|||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Install the InvokeAI Package. The `--extra-index-url` option is used to select among CUDA, ROCm and CPU/MPS drivers as shown below:
|
6. Install the InvokeAI Package. The `--extra-index-url` option is used to select among
|
||||||
|
CUDA, ROCm and CPU/MPS drivers as shown below:
|
||||||
|
|
||||||
=== "CUDA (NVidia)"
|
=== "CUDA (NVidia)"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "ROCm (AMD)"
|
=== "ROCm (AMD)"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "CPU (Intel Macs & non-GPU systems)"
|
=== "CPU (Intel Macs & non-GPU systems)"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "MPS (M1 and M2 Macs)"
|
=== "MPS (M1 and M2 Macs)"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
pip install InvokeAI --use-pep517
|
||||||
```
|
```
|
||||||
|
|
||||||
7. Deactivate and reactivate your runtime directory so that the invokeai-specific commands
|
7. Deactivate and reactivate your runtime directory so that the invokeai-specific commands
|
||||||
become available in the environment
|
become available in the environment
|
||||||
|
|
||||||
=== "Linux/Macintosh"
|
=== "Linux/Macintosh"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
deactivate && source .venv/bin/activate
|
deactivate && source .venv/bin/activate
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Windows"
|
=== "Windows"
|
||||||
```bash
|
|
||||||
|
```ps
|
||||||
deactivate
|
deactivate
|
||||||
.venv\Scripts\activate
|
.venv\Scripts\activate
|
||||||
```
|
```
|
||||||
|
|
||||||
8. Set up the runtime directory
|
8. Set up the runtime directory
|
||||||
@ -179,7 +191,7 @@ manager, please follow these steps:
|
|||||||
models, model config files, directory for textual inversion embeddings, and
|
models, model config files, directory for textual inversion embeddings, and
|
||||||
your outputs.
|
your outputs.
|
||||||
|
|
||||||
```bash
|
```terminal
|
||||||
invokeai-configure
|
invokeai-configure
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -283,13 +295,12 @@ on your system, please see the [Git Installation
|
|||||||
Guide](https://github.com/git-guides/install-git)
|
Guide](https://github.com/git-guides/install-git)
|
||||||
|
|
||||||
1. From the command line, run this command:
|
1. From the command line, run this command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||||
```
|
```
|
||||||
|
|
||||||
This will create a directory named `InvokeAI` and populate it with the
|
This will create a directory named `InvokeAI` and populate it with the
|
||||||
full source code from the InvokeAI repository.
|
full source code from the InvokeAI repository.
|
||||||
|
|
||||||
2. Activate the InvokeAI virtual environment as per step (4) of the manual
|
2. Activate the InvokeAI virtual environment as per step (4) of the manual
|
||||||
installation protocol (important!)
|
installation protocol (important!)
|
||||||
@ -304,7 +315,7 @@ installation protocol (important!)
|
|||||||
|
|
||||||
=== "ROCm (AMD)"
|
=== "ROCm (AMD)"
|
||||||
```bash
|
```bash
|
||||||
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "CPU (Intel Macs & non-GPU systems)"
|
=== "CPU (Intel Macs & non-GPU systems)"
|
||||||
@ -314,7 +325,7 @@ installation protocol (important!)
|
|||||||
|
|
||||||
=== "MPS (M1 and M2 Macs)"
|
=== "MPS (M1 and M2 Macs)"
|
||||||
```bash
|
```bash
|
||||||
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
pip install -e . --use-pep517
|
||||||
```
|
```
|
||||||
|
|
||||||
Be sure to pass `-e` (for an editable install) and don't forget the
|
Be sure to pass `-e` (for an editable install) and don't forget the
|
||||||
@ -330,5 +341,29 @@ installation protocol (important!)
|
|||||||
repository. You can then use GitHub functions to create and submit
|
repository. You can then use GitHub functions to create and submit
|
||||||
pull requests to contribute improvements to the project.
|
pull requests to contribute improvements to the project.
|
||||||
|
|
||||||
Please see [Contributing](/index.md#Contributing) for hints
|
Please see [Contributing](../index.md#contributing) for hints
|
||||||
on getting started.
|
on getting started.
|
||||||
|
|
||||||
|
### Unsupported Conda Install
|
||||||
|
|
||||||
|
Congratulations, you found the "secret" Conda installation
|
||||||
|
instructions. If you really **really** want to use Conda with InvokeAI
|
||||||
|
you can do so using this unsupported recipe:
|
||||||
|
|
||||||
|
```
|
||||||
|
mkdir ~/invokeai
|
||||||
|
conda create -n invokeai python=3.10
|
||||||
|
conda activate invokeai
|
||||||
|
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||||
|
invokeai-configure --root ~/invokeai
|
||||||
|
invokeai --root ~/invokeai --web
|
||||||
|
```
|
||||||
|
|
||||||
|
The `pip install` command shown in this recipe is for Linux/Windows
|
||||||
|
systems with an NVIDIA GPU. See step (6) above for the command to use
|
||||||
|
with other platforms/GPU combinations. If you don't wish to pass the
|
||||||
|
`--root` argument to `invokeai` with each launch, you may set the
|
||||||
|
environment variable INVOKEAI_ROOT to point to the installation directory.
|
||||||
|
|
||||||
|
Note that if you run into problems with the Conda installation, the InvokeAI
|
||||||
|
staff will **not** be able to help you out. Caveat Emptor!
|
||||||
|
@ -110,7 +110,7 @@ recipes are available
|
|||||||
|
|
||||||
When installing torch and torchvision manually with `pip`, remember to provide
|
When installing torch and torchvision manually with `pip`, remember to provide
|
||||||
the argument `--extra-index-url
|
the argument `--extra-index-url
|
||||||
https://download.pytorch.org/whl/rocm5.2` as described in the [Manual
|
https://download.pytorch.org/whl/rocm5.4.2` as described in the [Manual
|
||||||
Installation Guide](020_INSTALL_MANUAL.md).
|
Installation Guide](020_INSTALL_MANUAL.md).
|
||||||
|
|
||||||
This will be done automatically for you if you use the installer
|
This will be done automatically for you if you use the installer
|
||||||
|
@ -43,25 +43,31 @@ InvokeAI comes with support for a good set of starter models. You'll
|
|||||||
find them listed in the master models file
|
find them listed in the master models file
|
||||||
`configs/INITIAL_MODELS.yaml` in the InvokeAI root directory. The
|
`configs/INITIAL_MODELS.yaml` in the InvokeAI root directory. The
|
||||||
subset that are currently installed are found in
|
subset that are currently installed are found in
|
||||||
`configs/models.yaml`. The current list is:
|
`configs/models.yaml`. As of v2.3.1, the list of starter models is:
|
||||||
|
|
||||||
| Model | HuggingFace Repo ID | Description | URL
|
|Model Name | HuggingFace Repo ID | Description | URL |
|
||||||
| -------------------- | --------------------------------- | ---------------------------------------------------------- | -------------------------------------------------------------- |
|
|---------- | ---------- | ----------- | --- |
|
||||||
| stable-diffusion-1.5 | runwayml/stable-diffusion-v1-5 | Most recent version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
|stable-diffusion-1.5|runwayml/stable-diffusion-v1-5|Stable Diffusion version 1.5 diffusers model (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
||||||
| stable-diffusion-1.4 | runwayml/stable-diffusion-v1-4 | Previous version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-4 |
|
|sd-inpainting-1.5|runwayml/stable-diffusion-inpainting|RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
||||||
| inpainting-1.5 | runwayml/stable-diffusion-inpainting | Stable diffusion 1.5 optimized for inpainting | https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
|stable-diffusion-2.1|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
||||||
| stable-diffusion-2.1-base |stabilityai/stable-diffusion-2-1-base | Stable Diffusion version 2.1 trained on 512 pixel images | https://huggingface.co/stabilityai/stable-diffusion-2-1-base |
|
|sd-inpainting-2.0|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.0 inpainting model (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
||||||
| stable-diffusion-2.1-768 |stabilityai/stable-diffusion-2-1 | Stable Diffusion version 2.1 trained on 768 pixel images | https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
|analog-diffusion-1.0|wavymulder/Analog-Diffusion|An SD-1.5 model trained on diverse analog photographs (2.13 GB)|https://huggingface.co/wavymulder/Analog-Diffusion |
|
||||||
| dreamlike-diffusion-1.0 | dreamlike-art/dreamlike-diffusion-1.0 | An SD 1.5 model finetuned on high quality art | https://huggingface.co/dreamlike-art/dreamlike-diffusion-1.0 |
|
|deliberate-1.0|XpucT/Deliberate|Versatile model that produces detailed images up to 768px (4.27 GB)|https://huggingface.co/XpucT/Deliberate |
|
||||||
| dreamlike-photoreal-2.0 | dreamlike-art/dreamlike-photoreal-2.0 | A photorealistic model trained on 768 pixel images| https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0 |
|
|d&d-diffusion-1.0|0xJustin/Dungeons-and-Diffusion|Dungeons & Dragons characters (2.13 GB)|https://huggingface.co/0xJustin/Dungeons-and-Diffusion |
|
||||||
| openjourney-4.0 | prompthero/openjourney | An SD 1.5 model finetuned on Midjourney images prompt with "mdjrny-v4 style" | https://huggingface.co/prompthero/openjourney |
|
|dreamlike-photoreal-2.0|dreamlike-art/dreamlike-photoreal-2.0|A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)|https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0 |
|
||||||
| nitro-diffusion-1.0 | nitrosocke/Nitro-Diffusion | An SD 1.5 model finetuned on three styles, prompt with "archer style", "arcane style" or "modern disney style" | https://huggingface.co/nitrosocke/Nitro-Diffusion|
|
|inkpunk-1.0|Envvi/Inkpunk-Diffusion|Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)|https://huggingface.co/Envvi/Inkpunk-Diffusion |
|
||||||
| trinart-2.0 | naclbit/trinart_stable_diffusion_v2 | An SD 1.5 model finetuned with ~40,000 assorted high resolution manga/anime-style pictures | https://huggingface.co/naclbit/trinart_stable_diffusion_v2|
|
|openjourney-4.0|prompthero/openjourney|An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)|https://huggingface.co/prompthero/openjourney |
|
||||||
| trinart-characters-2_0 | naclbit/trinart_derrida_characters_v2_stable_diffusion | An SD 1.5 model finetuned with 19.2M manga/anime-style pictures | https://huggingface.co/naclbit/trinart_derrida_characters_v2_stable_diffusion|
|
|portrait-plus-1.0|wavymulder/portraitplus|An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB)|https://huggingface.co/wavymulder/portraitplus |
|
||||||
|
|seek-art-mega-1.0|coreco/seek.art_MEGA|A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)|https://huggingface.co/coreco/seek.art_MEGA |
|
||||||
|
|trinart-2.0|naclbit/trinart_stable_diffusion_v2|An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)|https://huggingface.co/naclbit/trinart_stable_diffusion_v2 |
|
||||||
|
|waifu-diffusion-1.4|hakurei/waifu-diffusion|An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB)|https://huggingface.co/hakurei/waifu-diffusion |
|
||||||
|
|
||||||
Note that these files are covered by an "Ethical AI" license which forbids
|
Note that these files are covered by an "Ethical AI" license which
|
||||||
certain uses. When you initially download them, you are asked to
|
forbids certain uses. When you initially download them, you are asked
|
||||||
accept the license terms.
|
to accept the license terms. In addition, some of these models carry
|
||||||
|
additional license terms that limit their use in commercial
|
||||||
|
applications or on public servers. Be sure to familiarize yourself
|
||||||
|
with the model terms by visiting the URLs in the table above.
|
||||||
|
|
||||||
## Community-Contributed Models
|
## Community-Contributed Models
|
||||||
|
|
||||||
@ -80,6 +86,13 @@ only `.safetensors` and `.ckpt` models, but they can be easily loaded
|
|||||||
into InvokeAI and/or converted into optimized `diffusers` models. Be
|
into InvokeAI and/or converted into optimized `diffusers` models. Be
|
||||||
aware that CIVITAI hosts many models that generate NSFW content.
|
aware that CIVITAI hosts many models that generate NSFW content.
|
||||||
|
|
||||||
|
!!! note
|
||||||
|
|
||||||
|
InvokeAI 2.3.x does not support directly importing and
|
||||||
|
running Stable Diffusion version 2 checkpoint models. You may instead
|
||||||
|
convert them into `diffusers` models using the conversion methods
|
||||||
|
described below.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
There are multiple ways to install and manage models:
|
There are multiple ways to install and manage models:
|
||||||
@ -90,7 +103,7 @@ There are multiple ways to install and manage models:
|
|||||||
models files.
|
models files.
|
||||||
|
|
||||||
3. The web interface (WebUI) has a GUI for importing and managing
|
3. The web interface (WebUI) has a GUI for importing and managing
|
||||||
models.
|
models.
|
||||||
|
|
||||||
### Installation via `invokeai-configure`
|
### Installation via `invokeai-configure`
|
||||||
|
|
||||||
@ -106,7 +119,7 @@ confirm that the files are complete.
|
|||||||
You can install a new model, including any of the community-supported ones, via
|
You can install a new model, including any of the community-supported ones, via
|
||||||
the command-line client's `!import_model` command.
|
the command-line client's `!import_model` command.
|
||||||
|
|
||||||
#### Installing `.ckpt` and `.safetensors` models
|
#### Installing individual `.ckpt` and `.safetensors` models
|
||||||
|
|
||||||
If the model is already downloaded to your local disk, use
|
If the model is already downloaded to your local disk, use
|
||||||
`!import_model /path/to/file.ckpt` to load it. For example:
|
`!import_model /path/to/file.ckpt` to load it. For example:
|
||||||
@ -131,15 +144,40 @@ invoke> !import_model https://example.org/sd_models/martians.safetensors
|
|||||||
For this to work, the URL must not be password-protected. Otherwise
|
For this to work, the URL must not be password-protected. Otherwise
|
||||||
you will receive a 404 error.
|
you will receive a 404 error.
|
||||||
|
|
||||||
When you import a legacy model, the CLI will ask you a few questions
|
When you import a legacy model, the CLI will first ask you what type
|
||||||
about the model, including what size image it was trained on (usually
|
of model this is. You can indicate whether it is a model based on
|
||||||
512x512), what name and description you wish to use for it, what
|
Stable Diffusion 1.x (1.4 or 1.5), one based on Stable Diffusion 2.x,
|
||||||
configuration file to use for it (usually the default
|
or a 1.x inpainting model. Be careful to indicate the correct model
|
||||||
`v1-inference.yaml`), whether you'd like to make this model the
|
type, or it will not load correctly. You can correct the model type
|
||||||
default at startup time, and whether you would like to install a
|
after the fact using the `!edit_model` command.
|
||||||
custom VAE (variable autoencoder) file for the model. For recent
|
|
||||||
models, the answer to the VAE question is usually "no," but it won't
|
The system will then ask you a few other questions about the model,
|
||||||
hurt to answer "yes".
|
including what size image it was trained on (usually 512x512), what
|
||||||
|
name and description you wish to use for it, and whether you would
|
||||||
|
like to install a custom VAE (variable autoencoder) file for the
|
||||||
|
model. For recent models, the answer to the VAE question is usually
|
||||||
|
"no," but it won't hurt to answer "yes".
|
||||||
|
|
||||||
|
After importing, the model will load. If this is successful, you will
|
||||||
|
be asked if you want to keep the model loaded in memory to start
|
||||||
|
generating immediately. You'll also be asked if you wish to make this
|
||||||
|
the default model on startup. You can change this later using
|
||||||
|
`!edit_model`.
|
||||||
|
|
||||||
|
#### Importing a batch of `.ckpt` and `.safetensors` models from a directory
|
||||||
|
|
||||||
|
You may also point `!import_model` to a directory containing a set of
|
||||||
|
`.ckpt` or `.safetensors` files. They will be imported _en masse_.
|
||||||
|
|
||||||
|
!!! example
|
||||||
|
|
||||||
|
```console
|
||||||
|
invoke> !import_model C:/Users/fred/Downloads/civitai_models/
|
||||||
|
```
|
||||||
|
|
||||||
|
You will be given the option to import all models found in the
|
||||||
|
directory, or select which ones to import. If there are subfolders
|
||||||
|
within the directory, they will be searched for models to import.
|
||||||
|
|
||||||
#### Installing `diffusers` models
|
#### Installing `diffusers` models
|
||||||
|
|
||||||
@ -279,19 +317,23 @@ After you save the modified `models.yaml` file relaunch
|
|||||||
### Installation via the WebUI
|
### Installation via the WebUI
|
||||||
|
|
||||||
To access the WebUI Model Manager, click on the button that looks like
|
To access the WebUI Model Manager, click on the button that looks like
|
||||||
a cute in the upper right side of the browser screen. This will bring
|
a cube in the upper right side of the browser screen. This will bring
|
||||||
up a dialogue that lists the models you have already installed, and
|
up a dialogue that lists the models you have already installed, and
|
||||||
allows you to load, delete or edit them:
|
allows you to load, delete or edit them:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
To add a new model, click on **+ Add New** and select to either a
|
To add a new model, click on **+ Add New** and select to either a
|
||||||
checkpoint/safetensors model, or a diffusers model:
|
checkpoint/safetensors model, or a diffusers model:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
In this example, we chose **Add Diffusers**. As shown in the figure
|
In this example, we chose **Add Diffusers**. As shown in the figure
|
||||||
@ -302,7 +344,9 @@ choose to enter a path to disk, the system will autocomplete for you
|
|||||||
as you type:
|
as you type:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
Press **Add Model** at the bottom of the dialogue (scrolled out of
|
Press **Add Model** at the bottom of the dialogue (scrolled out of
|
||||||
@ -317,7 +361,9 @@ directory and press the "Search" icon. This will display the
|
|||||||
subfolders, and allow you to choose which ones to import:
|
subfolders, and allow you to choose which ones to import:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
## Model Management Startup Options
|
## Model Management Startup Options
|
||||||
@ -342,9 +388,8 @@ invoke.sh --autoconvert /home/fred/stable-diffusion-checkpoints
|
|||||||
|
|
||||||
And here is what the same argument looks like in `invokeai.init`:
|
And here is what the same argument looks like in `invokeai.init`:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
--outdir="/home/fred/invokeai/outputs
|
--outdir="/home/fred/invokeai/outputs
|
||||||
--no-nsfw_checker
|
--no-nsfw_checker
|
||||||
--autoconvert /home/fred/stable-diffusion-checkpoints
|
--autoconvert /home/fred/stable-diffusion-checkpoints
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ You need to have opencv installed so that pypatchmatch can be built:
|
|||||||
brew install opencv
|
brew install opencv
|
||||||
```
|
```
|
||||||
|
|
||||||
The next time you start `invoke`, after sucesfully installing opencv, pypatchmatch will be built.
|
The next time you start `invoke`, after successfully installing opencv, pypatchmatch will be built.
|
||||||
|
|
||||||
## Linux
|
## Linux
|
||||||
|
|
||||||
@ -56,7 +56,7 @@ Prior to installing PyPatchMatch, you need to take the following steps:
|
|||||||
|
|
||||||
5. Confirm that pypatchmatch is installed. At the command-line prompt enter
|
5. Confirm that pypatchmatch is installed. At the command-line prompt enter
|
||||||
`python`, and then at the `>>>` line type
|
`python`, and then at the `>>>` line type
|
||||||
`from patchmatch import patch_match`: It should look like the follwing:
|
`from patchmatch import patch_match`: It should look like the following:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
Python 3.9.5 (default, Nov 23 2021, 15:27:38)
|
Python 3.9.5 (default, Nov 23 2021, 15:27:38)
|
||||||
@ -108,4 +108,4 @@ Prior to installing PyPatchMatch, you need to take the following steps:
|
|||||||
|
|
||||||
[**Next, Follow Steps 4-6 from the Debian Section above**](#linux)
|
[**Next, Follow Steps 4-6 from the Debian Section above**](#linux)
|
||||||
|
|
||||||
If you see no errors, then you're ready to go!
|
If you see no errors you're ready to go!
|
||||||
|
@ -1,73 +0,0 @@
|
|||||||
openapi: 3.0.3
|
|
||||||
info:
|
|
||||||
title: Stable Diffusion
|
|
||||||
description: |-
|
|
||||||
TODO: Description Here
|
|
||||||
|
|
||||||
Some useful links:
|
|
||||||
- [Stable Diffusion Dream Server](https://github.com/lstein/stable-diffusion)
|
|
||||||
|
|
||||||
license:
|
|
||||||
name: MIT License
|
|
||||||
url: https://github.com/lstein/stable-diffusion/blob/main/LICENSE
|
|
||||||
version: 1.0.0
|
|
||||||
servers:
|
|
||||||
- url: http://localhost:9090/api
|
|
||||||
tags:
|
|
||||||
- name: images
|
|
||||||
description: Retrieve and manage generated images
|
|
||||||
paths:
|
|
||||||
/images/{imageId}:
|
|
||||||
get:
|
|
||||||
tags:
|
|
||||||
- images
|
|
||||||
summary: Get image by ID
|
|
||||||
description: Returns a single image
|
|
||||||
operationId: getImageById
|
|
||||||
parameters:
|
|
||||||
- name: imageId
|
|
||||||
in: path
|
|
||||||
description: ID of image to return
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
responses:
|
|
||||||
'200':
|
|
||||||
description: successful operation
|
|
||||||
content:
|
|
||||||
image/png:
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
format: binary
|
|
||||||
'404':
|
|
||||||
description: Image not found
|
|
||||||
/intermediates/{intermediateId}/{step}:
|
|
||||||
get:
|
|
||||||
tags:
|
|
||||||
- images
|
|
||||||
summary: Get intermediate image by ID
|
|
||||||
description: Returns a single intermediate image
|
|
||||||
operationId: getIntermediateById
|
|
||||||
parameters:
|
|
||||||
- name: intermediateId
|
|
||||||
in: path
|
|
||||||
description: ID of intermediate to return
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
- name: step
|
|
||||||
in: path
|
|
||||||
description: The generation step of the intermediate
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
responses:
|
|
||||||
'200':
|
|
||||||
description: successful operation
|
|
||||||
content:
|
|
||||||
image/png:
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
format: binary
|
|
||||||
'404':
|
|
||||||
description: Intermediate not found
|
|
19
docs/other/TRANSLATION.md
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# Translation
|
||||||
|
|
||||||
|
InvokeAI uses [Weblate](https://weblate.org) for translation. Weblate is a FOSS project providing a scalable translation service. Weblate automates the tedious parts of managing translation of a growing project, and the service is generously provided at no cost to FOSS projects like InvokeAI.
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
If you'd like to contribute by adding or updating a translation, please visit our [Weblate project](https://hosted.weblate.org/engage/invokeai/). You'll need to sign in with your GitHub account (a number of other accounts are supported, including Google).
|
||||||
|
|
||||||
|
Once signed in, select a language and then the Web UI component. From here you can Browse and Translate strings from English to your chosen language. Zen mode offers a simpler translation experience.
|
||||||
|
|
||||||
|
Your changes will be attributed to you in the automated PR process; you don't need to do anything else.
|
||||||
|
|
||||||
|
## Help & Questions
|
||||||
|
|
||||||
|
Please check Weblate's [documentation](https://docs.weblate.org/en/latest/index.html) or ping @psychedelicious or @blessedcoolant on Discord if you have any questions.
|
||||||
|
|
||||||
|
## Thanks
|
||||||
|
|
||||||
|
Thanks to the InvokeAI community for their efforts to translate the project!
|
Before Width: | Height: | Size: 665 B |
Before Width: | Height: | Size: 628 B |
@ -1,16 +0,0 @@
|
|||||||
html {
|
|
||||||
box-sizing: border-box;
|
|
||||||
overflow: -moz-scrollbars-vertical;
|
|
||||||
overflow-y: scroll;
|
|
||||||
}
|
|
||||||
|
|
||||||
*,
|
|
||||||
*:before,
|
|
||||||
*:after {
|
|
||||||
box-sizing: inherit;
|
|
||||||
}
|
|
||||||
|
|
||||||
body {
|
|
||||||
margin: 0;
|
|
||||||
background: #fafafa;
|
|
||||||
}
|
|
@ -1,79 +0,0 @@
|
|||||||
<!doctype html>
|
|
||||||
<html lang="en-US">
|
|
||||||
<head>
|
|
||||||
<title>Swagger UI: OAuth2 Redirect</title>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<script>
|
|
||||||
'use strict';
|
|
||||||
function run () {
|
|
||||||
var oauth2 = window.opener.swaggerUIRedirectOauth2;
|
|
||||||
var sentState = oauth2.state;
|
|
||||||
var redirectUrl = oauth2.redirectUrl;
|
|
||||||
var isValid, qp, arr;
|
|
||||||
|
|
||||||
if (/code|token|error/.test(window.location.hash)) {
|
|
||||||
qp = window.location.hash.substring(1).replace('?', '&');
|
|
||||||
} else {
|
|
||||||
qp = location.search.substring(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
arr = qp.split("&");
|
|
||||||
arr.forEach(function (v,i,_arr) { _arr[i] = '"' + v.replace('=', '":"') + '"';});
|
|
||||||
qp = qp ? JSON.parse('{' + arr.join() + '}',
|
|
||||||
function (key, value) {
|
|
||||||
return key === "" ? value : decodeURIComponent(value);
|
|
||||||
}
|
|
||||||
) : {};
|
|
||||||
|
|
||||||
isValid = qp.state === sentState;
|
|
||||||
|
|
||||||
if ((
|
|
||||||
oauth2.auth.schema.get("flow") === "accessCode" ||
|
|
||||||
oauth2.auth.schema.get("flow") === "authorizationCode" ||
|
|
||||||
oauth2.auth.schema.get("flow") === "authorization_code"
|
|
||||||
) && !oauth2.auth.code) {
|
|
||||||
if (!isValid) {
|
|
||||||
oauth2.errCb({
|
|
||||||
authId: oauth2.auth.name,
|
|
||||||
source: "auth",
|
|
||||||
level: "warning",
|
|
||||||
message: "Authorization may be unsafe, passed state was changed in server. The passed state wasn't returned from auth server."
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if (qp.code) {
|
|
||||||
delete oauth2.state;
|
|
||||||
oauth2.auth.code = qp.code;
|
|
||||||
oauth2.callback({auth: oauth2.auth, redirectUrl: redirectUrl});
|
|
||||||
} else {
|
|
||||||
let oauthErrorMsg;
|
|
||||||
if (qp.error) {
|
|
||||||
oauthErrorMsg = "["+qp.error+"]: " +
|
|
||||||
(qp.error_description ? qp.error_description+ ". " : "no accessCode received from the server. ") +
|
|
||||||
(qp.error_uri ? "More info: "+qp.error_uri : "");
|
|
||||||
}
|
|
||||||
|
|
||||||
oauth2.errCb({
|
|
||||||
authId: oauth2.auth.name,
|
|
||||||
source: "auth",
|
|
||||||
level: "error",
|
|
||||||
message: oauthErrorMsg || "[Authorization failed]: no accessCode received from the server."
|
|
||||||
});
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
oauth2.callback({auth: oauth2.auth, token: qp, isValid: isValid, redirectUrl: redirectUrl});
|
|
||||||
}
|
|
||||||
window.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (document.readyState !== 'loading') {
|
|
||||||
run();
|
|
||||||
} else {
|
|
||||||
document.addEventListener('DOMContentLoaded', function () {
|
|
||||||
run();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
</script>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@ -1,20 +0,0 @@
|
|||||||
window.onload = function() {
|
|
||||||
//<editor-fold desc="Changeable Configuration Block">
|
|
||||||
|
|
||||||
// the following lines will be replaced by docker/configurator, when it runs in a docker-container
|
|
||||||
window.ui = SwaggerUIBundle({
|
|
||||||
url: "openapi3_0.yaml",
|
|
||||||
dom_id: '#swagger-ui',
|
|
||||||
deepLinking: true,
|
|
||||||
presets: [
|
|
||||||
SwaggerUIBundle.presets.apis,
|
|
||||||
SwaggerUIStandalonePreset
|
|
||||||
],
|
|
||||||
plugins: [
|
|
||||||
SwaggerUIBundle.plugins.DownloadUrl
|
|
||||||
],
|
|
||||||
layout: "StandaloneLayout"
|
|
||||||
});
|
|
||||||
|
|
||||||
//</editor-fold>
|
|
||||||
};
|
|
@ -11,19 +11,18 @@ if [[ -v "VIRTUAL_ENV" ]]; then
|
|||||||
exit -1
|
exit -1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
VERSION=$(cd ..; python -c "from ldm.invoke import __version__ as version; print(version)")
|
VERSION=$(cd ..; python -c "from invokeai.version import __version__ as version; print(version)")
|
||||||
PATCH=""
|
PATCH=""
|
||||||
VERSION="v${VERSION}${PATCH}"
|
VERSION="v${VERSION}${PATCH}"
|
||||||
LATEST_TAG="v2.3-latest"
|
LATEST_TAG="v3.0-latest"
|
||||||
|
|
||||||
echo Building installer for version $VERSION
|
echo Building installer for version $VERSION
|
||||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||||
|
|
||||||
read -e -p "Commit and tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
|
read -e -p "Tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
|
||||||
RESPONSE=${input:='n'}
|
RESPONSE=${input:='n'}
|
||||||
if [ "$RESPONSE" == 'y' ]; then
|
if [ "$RESPONSE" == 'y' ]; then
|
||||||
git commit -a
|
|
||||||
|
|
||||||
if ! git tag $VERSION ; then
|
if ! git tag $VERSION ; then
|
||||||
echo "Existing/invalid tag"
|
echo "Existing/invalid tag"
|
||||||
@ -32,6 +31,8 @@ if [ "$RESPONSE" == 'y' ]; then
|
|||||||
|
|
||||||
git push origin :refs/tags/$LATEST_TAG
|
git push origin :refs/tags/$LATEST_TAG
|
||||||
git tag -fa $LATEST_TAG
|
git tag -fa $LATEST_TAG
|
||||||
|
|
||||||
|
echo "remember to push --tags!"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# ----------------------
|
# ----------------------
|
||||||
|
@ -67,6 +67,8 @@ del /q .tmp1 .tmp2
|
|||||||
@rem -------------- Install and Configure ---------------
|
@rem -------------- Install and Configure ---------------
|
||||||
|
|
||||||
call python .\lib\main.py
|
call python .\lib\main.py
|
||||||
|
pause
|
||||||
|
exit /b
|
||||||
|
|
||||||
@rem ------------------------ Subroutines ---------------
|
@rem ------------------------ Subroutines ---------------
|
||||||
@rem routine to do comparison of semantic version numbers
|
@rem routine to do comparison of semantic version numbers
|
||||||
|
@ -9,13 +9,16 @@ cd $scriptdir
|
|||||||
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
|
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
|
||||||
|
|
||||||
MINIMUM_PYTHON_VERSION=3.9.0
|
MINIMUM_PYTHON_VERSION=3.9.0
|
||||||
|
MAXIMUM_PYTHON_VERSION=3.11.0
|
||||||
PYTHON=""
|
PYTHON=""
|
||||||
for candidate in python3.10 python3.9 python3 python python3.11 ; do
|
for candidate in python3.10 python3.9 python3 python ; do
|
||||||
if ppath=`which $candidate`; then
|
if ppath=`which $candidate`; then
|
||||||
python_version=$($ppath -V | awk '{ print $2 }')
|
python_version=$($ppath -V | awk '{ print $2 }')
|
||||||
if [ $(version $python_version) -ge $(version "$MINIMUM_PYTHON_VERSION") ]; then
|
if [ $(version $python_version) -ge $(version "$MINIMUM_PYTHON_VERSION") ]; then
|
||||||
PYTHON=$ppath
|
if [ $(version $python_version) -lt $(version "$MAXIMUM_PYTHON_VERSION") ]; then
|
||||||
break
|
PYTHON=$ppath
|
||||||
|
break
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
@ -28,3 +31,4 @@ if [ -z "$PYTHON" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
exec $PYTHON ./lib/main.py ${@}
|
exec $PYTHON ./lib/main.py ${@}
|
||||||
|
read -p "Press any key to exit"
|
||||||
|
@ -291,7 +291,7 @@ class InvokeAiInstance:
|
|||||||
src = Path(__file__).parents[1].expanduser().resolve()
|
src = Path(__file__).parents[1].expanduser().resolve()
|
||||||
# if the above directory contains one of these files, we'll do a source install
|
# if the above directory contains one of these files, we'll do a source install
|
||||||
next(src.glob("pyproject.toml"))
|
next(src.glob("pyproject.toml"))
|
||||||
next(src.glob("ldm"))
|
next(src.glob("invokeai"))
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
print("Unable to find a wheel or perform a source install. Giving up.")
|
print("Unable to find a wheel or perform a source install. Giving up.")
|
||||||
|
|
||||||
@ -336,17 +336,32 @@ class InvokeAiInstance:
|
|||||||
elif el in ['-y','--yes','--yes-to-all']:
|
elif el in ['-y','--yes','--yes-to-all']:
|
||||||
new_argv.append(el)
|
new_argv.append(el)
|
||||||
sys.argv = new_argv
|
sys.argv = new_argv
|
||||||
|
|
||||||
|
import requests # to catch download exceptions
|
||||||
from messages import introduction
|
from messages import introduction
|
||||||
|
|
||||||
introduction()
|
introduction()
|
||||||
|
|
||||||
from ldm.invoke.config import invokeai_configure
|
from invokeai.frontend.install import invokeai_configure
|
||||||
|
|
||||||
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
||||||
# from the installer will also automatically propagate down to the config script.
|
# from the installer will also automatically propagate down to the config script.
|
||||||
# this may change in the future with config refactoring!
|
# this may change in the future with config refactoring!
|
||||||
invokeai_configure.main()
|
succeeded = False
|
||||||
|
try:
|
||||||
|
invokeai_configure()
|
||||||
|
succeeded = True
|
||||||
|
except requests.exceptions.ConnectionError as e:
|
||||||
|
print(f'\nA network error was encountered during configuration and download: {str(e)}')
|
||||||
|
except OSError as e:
|
||||||
|
print(f'\nAn OS error was encountered during configuration and download: {str(e)}')
|
||||||
|
except Exception as e:
|
||||||
|
print(f'\nA problem was encountered during the configuration and download steps: {str(e)}')
|
||||||
|
finally:
|
||||||
|
if not succeeded:
|
||||||
|
print('To try again, find the "invokeai" directory, run the script "invoke.sh" or "invoke.bat"')
|
||||||
|
print('and choose option 7 to fix a broken install, optionally followed by option 5 to install models.')
|
||||||
|
print('Alternatively you can relaunch the installer.')
|
||||||
|
|
||||||
def install_user_scripts(self):
|
def install_user_scripts(self):
|
||||||
"""
|
"""
|
||||||
|
@ -6,15 +6,20 @@ setlocal
|
|||||||
call .venv\Scripts\activate.bat
|
call .venv\Scripts\activate.bat
|
||||||
set INVOKEAI_ROOT=.
|
set INVOKEAI_ROOT=.
|
||||||
|
|
||||||
|
:start
|
||||||
echo Do you want to generate images using the
|
echo Do you want to generate images using the
|
||||||
echo 1. command-line
|
echo 1. command-line interface
|
||||||
echo 2. browser-based UI
|
echo 2. browser-based UI
|
||||||
echo 3. run textual inversion training
|
echo 3. run textual inversion training
|
||||||
echo 4. merge models (diffusers type only)
|
echo 4. merge models (diffusers type only)
|
||||||
echo 5. re-run the configure script to download new models
|
echo 5. download and install models
|
||||||
echo 6. open the developer console
|
echo 6. change InvokeAI startup options
|
||||||
echo 7. command-line help
|
echo 7. re-run the configure script to fix a broken install
|
||||||
set /P restore="Please enter 1, 2, 3, 4, 5, 6 or 7: [2] "
|
echo 8. open the developer console
|
||||||
|
echo 9. update InvokeAI
|
||||||
|
echo 10. command-line help
|
||||||
|
echo Q - quit
|
||||||
|
set /P restore="Please enter 1-10, Q: [2] "
|
||||||
if not defined restore set restore=2
|
if not defined restore set restore=2
|
||||||
IF /I "%restore%" == "1" (
|
IF /I "%restore%" == "1" (
|
||||||
echo Starting the InvokeAI command-line..
|
echo Starting the InvokeAI command-line..
|
||||||
@ -24,14 +29,20 @@ IF /I "%restore%" == "1" (
|
|||||||
python .venv\Scripts\invokeai.exe --web %*
|
python .venv\Scripts\invokeai.exe --web %*
|
||||||
) ELSE IF /I "%restore%" == "3" (
|
) ELSE IF /I "%restore%" == "3" (
|
||||||
echo Starting textual inversion training..
|
echo Starting textual inversion training..
|
||||||
python .venv\Scripts\invokeai-ti.exe --gui %*
|
python .venv\Scripts\invokeai-ti.exe --gui
|
||||||
) ELSE IF /I "%restore%" == "4" (
|
) ELSE IF /I "%restore%" == "4" (
|
||||||
echo Starting model merging script..
|
echo Starting model merging script..
|
||||||
python .venv\Scripts\invokeai-merge.exe --gui %*
|
python .venv\Scripts\invokeai-merge.exe --gui
|
||||||
) ELSE IF /I "%restore%" == "5" (
|
) ELSE IF /I "%restore%" == "5" (
|
||||||
echo Running invokeai-configure...
|
echo Running invokeai-model-install...
|
||||||
python .venv\Scripts\invokeai-configure.exe %*
|
python .venv\Scripts\invokeai-model-install.exe
|
||||||
) ELSE IF /I "%restore%" == "6" (
|
) ELSE IF /I "%restore%" == "6" (
|
||||||
|
echo Running invokeai-configure...
|
||||||
|
python .venv\Scripts\invokeai-configure.exe --skip-sd-weight --skip-support-models
|
||||||
|
) ELSE IF /I "%restore%" == "7" (
|
||||||
|
echo Running invokeai-configure...
|
||||||
|
python .venv\Scripts\invokeai-configure.exe --yes --default_only
|
||||||
|
) ELSE IF /I "%restore%" == "8" (
|
||||||
echo Developer Console
|
echo Developer Console
|
||||||
echo Python command is:
|
echo Python command is:
|
||||||
where python
|
where python
|
||||||
@ -43,14 +54,27 @@ IF /I "%restore%" == "1" (
|
|||||||
echo *************************
|
echo *************************
|
||||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||||
call cmd /k
|
call cmd /k
|
||||||
) ELSE IF /I "%restore%" == "7" (
|
) ELSE IF /I "%restore%" == "9" (
|
||||||
|
echo Running invokeai-update...
|
||||||
|
python .venv\Scripts\invokeai-update.exe %*
|
||||||
|
) ELSE IF /I "%restore%" == "10" (
|
||||||
echo Displaying command line help...
|
echo Displaying command line help...
|
||||||
python .venv\Scripts\invokeai.exe --help %*
|
python .venv\Scripts\invokeai.exe --help %*
|
||||||
pause
|
pause
|
||||||
exit /b
|
exit /b
|
||||||
|
) ELSE IF /I "%restore%" == "q" (
|
||||||
|
echo Goodbye!
|
||||||
|
goto ending
|
||||||
) ELSE (
|
) ELSE (
|
||||||
echo Invalid selection
|
echo Invalid selection
|
||||||
pause
|
pause
|
||||||
exit /b
|
exit /b
|
||||||
)
|
)
|
||||||
|
goto start
|
||||||
|
|
||||||
endlocal
|
endlocal
|
||||||
|
pause
|
||||||
|
|
||||||
|
:ending
|
||||||
|
exit /b
|
||||||
|
|
||||||
|
@ -25,49 +25,69 @@ if [ "$(uname -s)" == "Darwin" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$0" != "bash" ]; then
|
if [ "$0" != "bash" ]; then
|
||||||
|
while true
|
||||||
|
do
|
||||||
echo "Do you want to generate images using the"
|
echo "Do you want to generate images using the"
|
||||||
echo "1. command-line"
|
echo "1. command-line interface"
|
||||||
echo "2. browser-based UI"
|
echo "2. browser-based UI"
|
||||||
echo "3. run textual inversion training"
|
echo "3. run textual inversion training"
|
||||||
echo "4. merge models (diffusers type only)"
|
echo "4. merge models (diffusers type only)"
|
||||||
echo "5. open the developer console"
|
echo "5. download and install models"
|
||||||
echo "6. re-run the configure script to download new models"
|
echo "6. change InvokeAI startup options"
|
||||||
echo "7. command-line help "
|
echo "7. re-run the configure script to fix a broken install"
|
||||||
|
echo "8. open the developer console"
|
||||||
|
echo "9. update InvokeAI"
|
||||||
|
echo "10. command-line help"
|
||||||
|
echo "Q - Quit"
|
||||||
echo ""
|
echo ""
|
||||||
read -p "Please enter 1, 2, 3, 4, 5, 6 or 7: [2] " yn
|
read -p "Please enter 1-10, Q: [2] " yn
|
||||||
choice=${yn:='2'}
|
choice=${yn:='2'}
|
||||||
case $choice in
|
case $choice in
|
||||||
1)
|
1)
|
||||||
echo "Starting the InvokeAI command-line..."
|
echo "Starting the InvokeAI command-line..."
|
||||||
exec invokeai $@
|
invokeai $@
|
||||||
;;
|
;;
|
||||||
2)
|
2)
|
||||||
echo "Starting the InvokeAI browser-based UI..."
|
echo "Starting the InvokeAI browser-based UI..."
|
||||||
exec invokeai --web $@
|
invokeai --web $@
|
||||||
;;
|
;;
|
||||||
3)
|
3)
|
||||||
echo "Starting Textual Inversion:"
|
echo "Starting Textual Inversion:"
|
||||||
exec invokeai-ti --gui $@
|
invokeai-ti --gui $@
|
||||||
;;
|
;;
|
||||||
4)
|
4)
|
||||||
echo "Merging Models:"
|
echo "Merging Models:"
|
||||||
exec invokeai-merge --gui $@
|
invokeai-merge --gui $@
|
||||||
;;
|
;;
|
||||||
5)
|
5)
|
||||||
|
invokeai-model-install --root ${INVOKEAI_ROOT}
|
||||||
|
;;
|
||||||
|
6)
|
||||||
|
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
|
||||||
|
;;
|
||||||
|
7)
|
||||||
|
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
|
||||||
|
;;
|
||||||
|
8)
|
||||||
echo "Developer Console:"
|
echo "Developer Console:"
|
||||||
file_name=$(basename "${BASH_SOURCE[0]}")
|
file_name=$(basename "${BASH_SOURCE[0]}")
|
||||||
bash --init-file "$file_name"
|
bash --init-file "$file_name"
|
||||||
;;
|
;;
|
||||||
6)
|
9)
|
||||||
exec invokeai-configure --root ${INVOKEAI_ROOT}
|
echo "Update:"
|
||||||
|
invokeai-update
|
||||||
;;
|
;;
|
||||||
7)
|
10)
|
||||||
exec invokeai --help
|
invokeai --help
|
||||||
|
;;
|
||||||
|
[qQ])
|
||||||
|
exit 0
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Invalid selection"
|
echo "Invalid selection"
|
||||||
exit;;
|
exit;;
|
||||||
esac
|
esac
|
||||||
|
done
|
||||||
else # in developer console
|
else # in developer console
|
||||||
python --version
|
python --version
|
||||||
echo "Press ^D to exit"
|
echo "Press ^D to exit"
|
||||||
|
@ -1,3 +1,11 @@
|
|||||||
After version 2.3 is released, the ldm/invoke modules will be migrated to this location
|
Organization of the source tree:
|
||||||
so that we have a proper invokeai distribution. Currently it is only being used for
|
|
||||||
data files.
|
app -- Home of nodes invocations and services
|
||||||
|
assets -- Images and other data files used by InvokeAI
|
||||||
|
backend -- Non-user facing libraries, including the rendering
|
||||||
|
core.
|
||||||
|
configs -- Configuration files used at install and run times
|
||||||
|
frontend -- User-facing scripts, including the CLI and the WebUI
|
||||||
|
version -- Current InvokeAI version string, stored
|
||||||
|
in version/invokeai_version.py
|
||||||
|
|
79
invokeai/app/api/dependencies.py
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
import os
|
||||||
|
from argparse import Namespace
|
||||||
|
|
||||||
|
from ...backend import Globals
|
||||||
|
from ..services.model_manager_initializer import get_model_manager
|
||||||
|
from ..services.restoration_services import RestorationServices
|
||||||
|
from ..services.graph import GraphExecutionState
|
||||||
|
from ..services.image_storage import DiskImageStorage
|
||||||
|
from ..services.invocation_queue import MemoryInvocationQueue
|
||||||
|
from ..services.invocation_services import InvocationServices
|
||||||
|
from ..services.invoker import Invoker
|
||||||
|
from ..services.processor import DefaultInvocationProcessor
|
||||||
|
from ..services.sqlite import SqliteItemStorage
|
||||||
|
from .events import FastAPIEventService
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: is there a better way to achieve this?
|
||||||
|
def check_internet() -> bool:
|
||||||
|
"""
|
||||||
|
Return true if the internet is reachable.
|
||||||
|
It does this by pinging huggingface.co.
|
||||||
|
"""
|
||||||
|
import urllib.request
|
||||||
|
|
||||||
|
host = "http://huggingface.co"
|
||||||
|
try:
|
||||||
|
urllib.request.urlopen(host, timeout=1)
|
||||||
|
return True
|
||||||
|
except:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class ApiDependencies:
|
||||||
|
"""Contains and initializes all dependencies for the API"""
|
||||||
|
|
||||||
|
invoker: Invoker = None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def initialize(config, event_handler_id: int):
|
||||||
|
Globals.try_patchmatch = config.patchmatch
|
||||||
|
Globals.always_use_cpu = config.always_use_cpu
|
||||||
|
Globals.internet_available = config.internet_available and check_internet()
|
||||||
|
Globals.disable_xformers = not config.xformers
|
||||||
|
Globals.ckpt_convert = config.ckpt_convert
|
||||||
|
|
||||||
|
# TODO: Use a logger
|
||||||
|
print(f">> Internet connectivity is {Globals.internet_available}")
|
||||||
|
|
||||||
|
events = FastAPIEventService(event_handler_id)
|
||||||
|
|
||||||
|
output_folder = os.path.abspath(
|
||||||
|
os.path.join(os.path.dirname(__file__), "../../../../outputs")
|
||||||
|
)
|
||||||
|
|
||||||
|
images = DiskImageStorage(output_folder)
|
||||||
|
|
||||||
|
# TODO: build a file/path manager?
|
||||||
|
db_location = os.path.join(output_folder, "invokeai.db")
|
||||||
|
|
||||||
|
services = InvocationServices(
|
||||||
|
model_manager=get_model_manager(config),
|
||||||
|
events=events,
|
||||||
|
images=images,
|
||||||
|
queue=MemoryInvocationQueue(),
|
||||||
|
graph_execution_manager=SqliteItemStorage[GraphExecutionState](
|
||||||
|
filename=db_location, table_name="graph_executions"
|
||||||
|
),
|
||||||
|
processor=DefaultInvocationProcessor(),
|
||||||
|
restoration=RestorationServices(config),
|
||||||
|
)
|
||||||
|
|
||||||
|
ApiDependencies.invoker = Invoker(services)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def shutdown():
|
||||||
|
if ApiDependencies.invoker:
|
||||||
|
ApiDependencies.invoker.stop()
|
52
invokeai/app/api/events.py
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import threading
|
||||||
|
from queue import Empty, Queue
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from fastapi_events.dispatcher import dispatch
|
||||||
|
|
||||||
|
from ..services.events import EventServiceBase
|
||||||
|
|
||||||
|
|
||||||
|
class FastAPIEventService(EventServiceBase):
|
||||||
|
event_handler_id: int
|
||||||
|
__queue: Queue
|
||||||
|
__stop_event: threading.Event
|
||||||
|
|
||||||
|
def __init__(self, event_handler_id: int) -> None:
|
||||||
|
self.event_handler_id = event_handler_id
|
||||||
|
self.__queue = Queue()
|
||||||
|
self.__stop_event = threading.Event()
|
||||||
|
asyncio.create_task(self.__dispatch_from_queue(stop_event=self.__stop_event))
|
||||||
|
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
def stop(self, *args, **kwargs):
|
||||||
|
self.__stop_event.set()
|
||||||
|
self.__queue.put(None)
|
||||||
|
|
||||||
|
def dispatch(self, event_name: str, payload: Any) -> None:
|
||||||
|
self.__queue.put(dict(event_name=event_name, payload=payload))
|
||||||
|
|
||||||
|
async def __dispatch_from_queue(self, stop_event: threading.Event):
|
||||||
|
"""Get events on from the queue and dispatch them, from the correct thread"""
|
||||||
|
while not stop_event.is_set():
|
||||||
|
try:
|
||||||
|
event = self.__queue.get(block=False)
|
||||||
|
if not event: # Probably stopping
|
||||||
|
continue
|
||||||
|
|
||||||
|
dispatch(
|
||||||
|
event.get("event_name"),
|
||||||
|
payload=event.get("payload"),
|
||||||
|
middleware_id=self.event_handler_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
except Empty:
|
||||||
|
await asyncio.sleep(0.001)
|
||||||
|
pass
|
||||||
|
|
||||||
|
except asyncio.CancelledError as e:
|
||||||
|
raise e # Raise a proper error
|
56
invokeai/app/api/routers/images.py
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
from fastapi import Path, Request, UploadFile
|
||||||
|
from fastapi.responses import FileResponse, Response
|
||||||
|
from fastapi.routing import APIRouter
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
from ...services.image_storage import ImageType
|
||||||
|
from ..dependencies import ApiDependencies
|
||||||
|
|
||||||
|
images_router = APIRouter(prefix="/v1/images", tags=["images"])
|
||||||
|
|
||||||
|
|
||||||
|
@images_router.get("/{image_type}/{image_name}", operation_id="get_image")
|
||||||
|
async def get_image(
|
||||||
|
image_type: ImageType = Path(description="The type of image to get"),
|
||||||
|
image_name: str = Path(description="The name of the image to get"),
|
||||||
|
):
|
||||||
|
"""Gets a result"""
|
||||||
|
# TODO: This is not really secure at all. At least make sure only output results are served
|
||||||
|
filename = ApiDependencies.invoker.services.images.get_path(image_type, image_name)
|
||||||
|
return FileResponse(filename)
|
||||||
|
|
||||||
|
|
||||||
|
@images_router.post(
|
||||||
|
"/uploads/",
|
||||||
|
operation_id="upload_image",
|
||||||
|
responses={
|
||||||
|
201: {"description": "The image was uploaded successfully"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def upload_image(file: UploadFile, request: Request):
|
||||||
|
if not file.content_type.startswith("image"):
|
||||||
|
return Response(status_code=415)
|
||||||
|
|
||||||
|
contents = await file.read()
|
||||||
|
try:
|
||||||
|
im = Image.open(contents)
|
||||||
|
except:
|
||||||
|
# Error opening the image
|
||||||
|
return Response(status_code=415)
|
||||||
|
|
||||||
|
filename = f"{str(int(datetime.now(timezone.utc).timestamp()))}.png"
|
||||||
|
ApiDependencies.invoker.services.images.save(ImageType.UPLOAD, filename, im)
|
||||||
|
|
||||||
|
return Response(
|
||||||
|
status_code=201,
|
||||||
|
headers={
|
||||||
|
"Location": request.url_for(
|
||||||
|
"get_image", image_type=ImageType.UPLOAD, image_name=filename
|
||||||
|
)
|
||||||
|
},
|
||||||
|
)
|
287
invokeai/app/api/routers/sessions.py
Normal file
@ -0,0 +1,287 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from typing import Annotated, List, Optional, Union
|
||||||
|
|
||||||
|
from fastapi import Body, Path, Query
|
||||||
|
from fastapi.responses import Response
|
||||||
|
from fastapi.routing import APIRouter
|
||||||
|
from pydantic.fields import Field
|
||||||
|
|
||||||
|
from ...invocations import *
|
||||||
|
from ...invocations.baseinvocation import BaseInvocation
|
||||||
|
from ...services.graph import (
|
||||||
|
Edge,
|
||||||
|
EdgeConnection,
|
||||||
|
Graph,
|
||||||
|
GraphExecutionState,
|
||||||
|
NodeAlreadyExecutedError,
|
||||||
|
)
|
||||||
|
from ...services.item_storage import PaginatedResults
|
||||||
|
from ..dependencies import ApiDependencies
|
||||||
|
|
||||||
|
session_router = APIRouter(prefix="/v1/sessions", tags=["sessions"])
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.post(
|
||||||
|
"/",
|
||||||
|
operation_id="create_session",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
400: {"description": "Invalid json"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def create_session(
|
||||||
|
graph: Optional[Graph] = Body(
|
||||||
|
default=None, description="The graph to initialize the session with"
|
||||||
|
)
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Creates a new session, optionally initializing it with an invocation graph"""
|
||||||
|
session = ApiDependencies.invoker.create_execution_state(graph)
|
||||||
|
return session
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.get(
|
||||||
|
"/",
|
||||||
|
operation_id="list_sessions",
|
||||||
|
responses={200: {"model": PaginatedResults[GraphExecutionState]}},
|
||||||
|
)
|
||||||
|
async def list_sessions(
|
||||||
|
page: int = Query(default=0, description="The page of results to get"),
|
||||||
|
per_page: int = Query(default=10, description="The number of results per page"),
|
||||||
|
query: str = Query(default="", description="The query string to search for"),
|
||||||
|
) -> PaginatedResults[GraphExecutionState]:
|
||||||
|
"""Gets a list of sessions, optionally searching"""
|
||||||
|
if filter == "":
|
||||||
|
result = ApiDependencies.invoker.services.graph_execution_manager.list(
|
||||||
|
page, per_page
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
result = ApiDependencies.invoker.services.graph_execution_manager.search(
|
||||||
|
query, page, per_page
|
||||||
|
)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.get(
|
||||||
|
"/{session_id}",
|
||||||
|
operation_id="get_session",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def get_session(
|
||||||
|
session_id: str = Path(description="The id of the session to get"),
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Gets a session"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
return Response(status_code=404)
|
||||||
|
else:
|
||||||
|
return session
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.post(
|
||||||
|
"/{session_id}/nodes",
|
||||||
|
operation_id="add_node",
|
||||||
|
responses={
|
||||||
|
200: {"model": str},
|
||||||
|
400: {"description": "Invalid node or link"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def add_node(
|
||||||
|
session_id: str = Path(description="The id of the session"),
|
||||||
|
node: Annotated[
|
||||||
|
Union[BaseInvocation.get_invocations()], Field(discriminator="type") # type: ignore
|
||||||
|
] = Body(description="The node to add"),
|
||||||
|
) -> str:
|
||||||
|
"""Adds a node to the graph"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
return Response(status_code=404)
|
||||||
|
|
||||||
|
try:
|
||||||
|
session.add_node(node)
|
||||||
|
ApiDependencies.invoker.services.graph_execution_manager.set(
|
||||||
|
session
|
||||||
|
) # TODO: can this be done automatically, or add node through an API?
|
||||||
|
return session.id
|
||||||
|
except NodeAlreadyExecutedError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
except IndexError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.put(
|
||||||
|
"/{session_id}/nodes/{node_path}",
|
||||||
|
operation_id="update_node",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
400: {"description": "Invalid node or link"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def update_node(
|
||||||
|
session_id: str = Path(description="The id of the session"),
|
||||||
|
node_path: str = Path(description="The path to the node in the graph"),
|
||||||
|
node: Annotated[
|
||||||
|
Union[BaseInvocation.get_invocations()], Field(discriminator="type") # type: ignore
|
||||||
|
] = Body(description="The new node"),
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Updates a node in the graph and removes all linked edges"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
return Response(status_code=404)
|
||||||
|
|
||||||
|
try:
|
||||||
|
session.update_node(node_path, node)
|
||||||
|
ApiDependencies.invoker.services.graph_execution_manager.set(
|
||||||
|
session
|
||||||
|
) # TODO: can this be done automatically, or add node through an API?
|
||||||
|
return session
|
||||||
|
except NodeAlreadyExecutedError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
except IndexError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.delete(
|
||||||
|
"/{session_id}/nodes/{node_path}",
|
||||||
|
operation_id="delete_node",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
400: {"description": "Invalid node or link"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def delete_node(
|
||||||
|
session_id: str = Path(description="The id of the session"),
|
||||||
|
node_path: str = Path(description="The path to the node to delete"),
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Deletes a node in the graph and removes all linked edges"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
return Response(status_code=404)
|
||||||
|
|
||||||
|
try:
|
||||||
|
session.delete_node(node_path)
|
||||||
|
ApiDependencies.invoker.services.graph_execution_manager.set(
|
||||||
|
session
|
||||||
|
) # TODO: can this be done automatically, or add node through an API?
|
||||||
|
return session
|
||||||
|
except NodeAlreadyExecutedError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
except IndexError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.post(
|
||||||
|
"/{session_id}/edges",
|
||||||
|
operation_id="add_edge",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
400: {"description": "Invalid node or link"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def add_edge(
|
||||||
|
session_id: str = Path(description="The id of the session"),
|
||||||
|
edge: Edge = Body(description="The edge to add"),
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Adds an edge to the graph"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
return Response(status_code=404)
|
||||||
|
|
||||||
|
try:
|
||||||
|
session.add_edge(edge)
|
||||||
|
ApiDependencies.invoker.services.graph_execution_manager.set(
|
||||||
|
session
|
||||||
|
) # TODO: can this be done automatically, or add node through an API?
|
||||||
|
return session
|
||||||
|
except NodeAlreadyExecutedError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
except IndexError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: the edge being in the path here is really ugly, find a better solution
|
||||||
|
@session_router.delete(
|
||||||
|
"/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}",
|
||||||
|
operation_id="delete_edge",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
400: {"description": "Invalid node or link"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def delete_edge(
|
||||||
|
session_id: str = Path(description="The id of the session"),
|
||||||
|
from_node_id: str = Path(description="The id of the node the edge is coming from"),
|
||||||
|
from_field: str = Path(description="The field of the node the edge is coming from"),
|
||||||
|
to_node_id: str = Path(description="The id of the node the edge is going to"),
|
||||||
|
to_field: str = Path(description="The field of the node the edge is going to"),
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Deletes an edge from the graph"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
return Response(status_code=404)
|
||||||
|
|
||||||
|
try:
|
||||||
|
edge = Edge(
|
||||||
|
source=EdgeConnection(node_id=from_node_id, field=from_field),
|
||||||
|
destination=EdgeConnection(node_id=to_node_id, field=to_field)
|
||||||
|
)
|
||||||
|
session.delete_edge(edge)
|
||||||
|
ApiDependencies.invoker.services.graph_execution_manager.set(
|
||||||
|
session
|
||||||
|
) # TODO: can this be done automatically, or add node through an API?
|
||||||
|
return session
|
||||||
|
except NodeAlreadyExecutedError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
except IndexError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.put(
|
||||||
|
"/{session_id}/invoke",
|
||||||
|
operation_id="invoke_session",
|
||||||
|
responses={
|
||||||
|
200: {"model": None},
|
||||||
|
202: {"description": "The invocation is queued"},
|
||||||
|
400: {"description": "The session has no invocations ready to invoke"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def invoke_session(
|
||||||
|
session_id: str = Path(description="The id of the session to invoke"),
|
||||||
|
all: bool = Query(
|
||||||
|
default=False, description="Whether or not to invoke all remaining invocations"
|
||||||
|
),
|
||||||
|
) -> None:
|
||||||
|
"""Invokes a session"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
return Response(status_code=404)
|
||||||
|
|
||||||
|
if session.is_complete():
|
||||||
|
return Response(status_code=400)
|
||||||
|
|
||||||
|
ApiDependencies.invoker.invoke(session, invoke_all=all)
|
||||||
|
return Response(status_code=202)
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.delete(
|
||||||
|
"/{session_id}/invoke",
|
||||||
|
operation_id="cancel_session_invoke",
|
||||||
|
responses={
|
||||||
|
202: {"description": "The invocation is canceled"}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def cancel_session_invoke(
|
||||||
|
session_id: str = Path(description="The id of the session to cancel"),
|
||||||
|
) -> None:
|
||||||
|
"""Invokes a session"""
|
||||||
|
ApiDependencies.invoker.cancel(session_id)
|
||||||
|
return Response(status_code=202)
|
38
invokeai/app/api/sockets.py
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from fastapi import FastAPI
|
||||||
|
from fastapi_events.handlers.local import local_handler
|
||||||
|
from fastapi_events.typing import Event
|
||||||
|
from fastapi_socketio import SocketManager
|
||||||
|
|
||||||
|
from ..services.events import EventServiceBase
|
||||||
|
|
||||||
|
|
||||||
|
class SocketIO:
|
||||||
|
__sio: SocketManager
|
||||||
|
|
||||||
|
def __init__(self, app: FastAPI):
|
||||||
|
self.__sio = SocketManager(app=app)
|
||||||
|
self.__sio.on("subscribe", handler=self._handle_sub)
|
||||||
|
self.__sio.on("unsubscribe", handler=self._handle_unsub)
|
||||||
|
|
||||||
|
local_handler.register(
|
||||||
|
event_name=EventServiceBase.session_event, _func=self._handle_session_event
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _handle_session_event(self, event: Event):
|
||||||
|
await self.__sio.emit(
|
||||||
|
event=event[1]["event"],
|
||||||
|
data=event[1]["data"],
|
||||||
|
room=event[1]["data"]["graph_execution_state_id"],
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _handle_sub(self, sid, data, *args, **kwargs):
|
||||||
|
if "session" in data:
|
||||||
|
self.__sio.enter_room(sid, data["session"])
|
||||||
|
|
||||||
|
# @app.sio.on('unsubscribe')
|
||||||
|
|
||||||
|
async def _handle_unsub(self, sid, data, *args, **kwargs):
|
||||||
|
if "session" in data:
|
||||||
|
self.__sio.leave_room(sid, data["session"])
|
158
invokeai/app/api_app.py
Normal file
@ -0,0 +1,158 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
import asyncio
|
||||||
|
from inspect import signature
|
||||||
|
|
||||||
|
import uvicorn
|
||||||
|
from fastapi import FastAPI
|
||||||
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
|
from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html
|
||||||
|
from fastapi.openapi.utils import get_openapi
|
||||||
|
from fastapi.staticfiles import StaticFiles
|
||||||
|
from fastapi_events.handlers.local import local_handler
|
||||||
|
from fastapi_events.middleware import EventHandlerASGIMiddleware
|
||||||
|
from pydantic.schema import schema
|
||||||
|
|
||||||
|
from ..backend import Args
|
||||||
|
from .api.dependencies import ApiDependencies
|
||||||
|
from .api.routers import images, sessions
|
||||||
|
from .api.sockets import SocketIO
|
||||||
|
from .invocations import *
|
||||||
|
from .invocations.baseinvocation import BaseInvocation
|
||||||
|
|
||||||
|
# Create the app
|
||||||
|
# TODO: create this all in a method so configuration/etc. can be passed in?
|
||||||
|
app = FastAPI(title="Invoke AI", docs_url=None, redoc_url=None)
|
||||||
|
|
||||||
|
# Add event handler
|
||||||
|
event_handler_id: int = id(app)
|
||||||
|
app.add_middleware(
|
||||||
|
EventHandlerASGIMiddleware,
|
||||||
|
handlers=[
|
||||||
|
local_handler
|
||||||
|
], # TODO: consider doing this in services to support different configurations
|
||||||
|
middleware_id=event_handler_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add CORS
|
||||||
|
# TODO: use configuration for this
|
||||||
|
origins = []
|
||||||
|
app.add_middleware(
|
||||||
|
CORSMiddleware,
|
||||||
|
allow_origins=origins,
|
||||||
|
allow_credentials=True,
|
||||||
|
allow_methods=["*"],
|
||||||
|
allow_headers=["*"],
|
||||||
|
)
|
||||||
|
|
||||||
|
socket_io = SocketIO(app)
|
||||||
|
|
||||||
|
config = {}
|
||||||
|
|
||||||
|
|
||||||
|
# Add startup event to load dependencies
|
||||||
|
@app.on_event("startup")
|
||||||
|
async def startup_event():
|
||||||
|
config = Args()
|
||||||
|
config.parse_args()
|
||||||
|
|
||||||
|
ApiDependencies.initialize(
|
||||||
|
config=config, event_handler_id=event_handler_id
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Shut down threads
|
||||||
|
@app.on_event("shutdown")
|
||||||
|
async def shutdown_event():
|
||||||
|
ApiDependencies.shutdown()
|
||||||
|
|
||||||
|
|
||||||
|
# Include all routers
|
||||||
|
# TODO: REMOVE
|
||||||
|
# app.include_router(
|
||||||
|
# invocation.invocation_router,
|
||||||
|
# prefix = '/api')
|
||||||
|
|
||||||
|
app.include_router(sessions.session_router, prefix="/api")
|
||||||
|
|
||||||
|
app.include_router(images.images_router, prefix="/api")
|
||||||
|
|
||||||
|
|
||||||
|
# Build a custom OpenAPI to include all outputs
|
||||||
|
# TODO: can outputs be included on metadata of invocation schemas somehow?
|
||||||
|
def custom_openapi():
|
||||||
|
if app.openapi_schema:
|
||||||
|
return app.openapi_schema
|
||||||
|
openapi_schema = get_openapi(
|
||||||
|
title=app.title,
|
||||||
|
description="An API for invoking AI image operations",
|
||||||
|
version="1.0.0",
|
||||||
|
routes=app.routes,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add all outputs
|
||||||
|
all_invocations = BaseInvocation.get_invocations()
|
||||||
|
output_types = set()
|
||||||
|
output_type_titles = dict()
|
||||||
|
for invoker in all_invocations:
|
||||||
|
output_type = signature(invoker.invoke).return_annotation
|
||||||
|
output_types.add(output_type)
|
||||||
|
|
||||||
|
output_schemas = schema(output_types, ref_prefix="#/components/schemas/")
|
||||||
|
for schema_key, output_schema in output_schemas["definitions"].items():
|
||||||
|
openapi_schema["components"]["schemas"][schema_key] = output_schema
|
||||||
|
|
||||||
|
# TODO: note that we assume the schema_key here is the TYPE.__name__
|
||||||
|
# This could break in some cases, figure out a better way to do it
|
||||||
|
output_type_titles[schema_key] = output_schema["title"]
|
||||||
|
|
||||||
|
# Add a reference to the output type to additionalProperties of the invoker schema
|
||||||
|
for invoker in all_invocations:
|
||||||
|
invoker_name = invoker.__name__
|
||||||
|
output_type = signature(invoker.invoke).return_annotation
|
||||||
|
output_type_title = output_type_titles[output_type.__name__]
|
||||||
|
invoker_schema = openapi_schema["components"]["schemas"][invoker_name]
|
||||||
|
outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"}
|
||||||
|
|
||||||
|
invoker_schema["output"] = outputs_ref
|
||||||
|
|
||||||
|
app.openapi_schema = openapi_schema
|
||||||
|
return app.openapi_schema
|
||||||
|
|
||||||
|
|
||||||
|
app.openapi = custom_openapi
|
||||||
|
|
||||||
|
# Override API doc favicons
|
||||||
|
app.mount("/static", StaticFiles(directory="static/dream_web"), name="static")
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/docs", include_in_schema=False)
|
||||||
|
def overridden_swagger():
|
||||||
|
return get_swagger_ui_html(
|
||||||
|
openapi_url=app.openapi_url,
|
||||||
|
title=app.title,
|
||||||
|
swagger_favicon_url="/static/favicon.ico",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/redoc", include_in_schema=False)
|
||||||
|
def overridden_redoc():
|
||||||
|
return get_redoc_html(
|
||||||
|
openapi_url=app.openapi_url,
|
||||||
|
title=app.title,
|
||||||
|
redoc_favicon_url="/static/favicon.ico",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def invoke_api():
|
||||||
|
# Start our own event loop for eventing usage
|
||||||
|
# TODO: determine if there's a better way to do this
|
||||||
|
loop = asyncio.new_event_loop()
|
||||||
|
config = uvicorn.Config(app=app, host="0.0.0.0", port=9090, loop=loop)
|
||||||
|
# Use access_log to turn off logging
|
||||||
|
|
||||||
|
server = uvicorn.Server(config)
|
||||||
|
loop.run_until_complete(server.serve())
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
invoke_api()
|
202
invokeai/app/cli/commands.py
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
import argparse
|
||||||
|
from typing import Any, Callable, Iterable, Literal, get_args, get_origin, get_type_hints
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from ..invocations.image import ImageField
|
||||||
|
from ..services.graph import GraphExecutionState
|
||||||
|
from ..services.invoker import Invoker
|
||||||
|
|
||||||
|
|
||||||
|
def add_parsers(
|
||||||
|
subparsers,
|
||||||
|
commands: list[type],
|
||||||
|
command_field: str = "type",
|
||||||
|
exclude_fields: list[str] = ["id", "type"],
|
||||||
|
add_arguments: Callable[[argparse.ArgumentParser], None]|None = None
|
||||||
|
):
|
||||||
|
"""Adds parsers for each command to the subparsers"""
|
||||||
|
|
||||||
|
# Create subparsers for each command
|
||||||
|
for command in commands:
|
||||||
|
hints = get_type_hints(command)
|
||||||
|
cmd_name = get_args(hints[command_field])[0]
|
||||||
|
command_parser = subparsers.add_parser(cmd_name, help=command.__doc__)
|
||||||
|
|
||||||
|
if add_arguments is not None:
|
||||||
|
add_arguments(command_parser)
|
||||||
|
|
||||||
|
# Convert all fields to arguments
|
||||||
|
fields = command.__fields__ # type: ignore
|
||||||
|
for name, field in fields.items():
|
||||||
|
if name in exclude_fields:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if get_origin(field.type_) == Literal:
|
||||||
|
allowed_values = get_args(field.type_)
|
||||||
|
allowed_types = set()
|
||||||
|
for val in allowed_values:
|
||||||
|
allowed_types.add(type(val))
|
||||||
|
allowed_types_list = list(allowed_types)
|
||||||
|
field_type = allowed_types_list[0] if len(allowed_types) == 1 else Union[allowed_types_list] # type: ignore
|
||||||
|
|
||||||
|
command_parser.add_argument(
|
||||||
|
f"--{name}",
|
||||||
|
dest=name,
|
||||||
|
type=field_type,
|
||||||
|
default=field.default,
|
||||||
|
choices=allowed_values,
|
||||||
|
help=field.field_info.description,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
command_parser.add_argument(
|
||||||
|
f"--{name}",
|
||||||
|
dest=name,
|
||||||
|
type=field.type_,
|
||||||
|
default=field.default,
|
||||||
|
help=field.field_info.description,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class CliContext:
|
||||||
|
invoker: Invoker
|
||||||
|
session: GraphExecutionState
|
||||||
|
parser: argparse.ArgumentParser
|
||||||
|
defaults: dict[str, Any]
|
||||||
|
|
||||||
|
def __init__(self, invoker: Invoker, session: GraphExecutionState, parser: argparse.ArgumentParser):
|
||||||
|
self.invoker = invoker
|
||||||
|
self.session = session
|
||||||
|
self.parser = parser
|
||||||
|
self.defaults = dict()
|
||||||
|
|
||||||
|
def get_session(self):
|
||||||
|
self.session = self.invoker.services.graph_execution_manager.get(self.session.id)
|
||||||
|
return self.session
|
||||||
|
|
||||||
|
|
||||||
|
class ExitCli(Exception):
|
||||||
|
"""Exception to exit the CLI"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class BaseCommand(ABC, BaseModel):
|
||||||
|
"""A CLI command"""
|
||||||
|
|
||||||
|
# All commands must include a type name like this:
|
||||||
|
# type: Literal['your_command_name'] = 'your_command_name'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_all_subclasses(cls):
|
||||||
|
subclasses = []
|
||||||
|
toprocess = [cls]
|
||||||
|
while len(toprocess) > 0:
|
||||||
|
next = toprocess.pop(0)
|
||||||
|
next_subclasses = next.__subclasses__()
|
||||||
|
subclasses.extend(next_subclasses)
|
||||||
|
toprocess.extend(next_subclasses)
|
||||||
|
return subclasses
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_commands(cls):
|
||||||
|
return tuple(BaseCommand.get_all_subclasses())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_commands_map(cls):
|
||||||
|
# Get the type strings out of the literals and into a dictionary
|
||||||
|
return dict(map(lambda t: (get_args(get_type_hints(t)['type'])[0], t),BaseCommand.get_all_subclasses()))
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
"""Run the command. Raise ExitCli to exit."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ExitCommand(BaseCommand):
|
||||||
|
"""Exits the CLI"""
|
||||||
|
type: Literal['exit'] = 'exit'
|
||||||
|
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
raise ExitCli()
|
||||||
|
|
||||||
|
|
||||||
|
class HelpCommand(BaseCommand):
|
||||||
|
"""Shows help"""
|
||||||
|
type: Literal['help'] = 'help'
|
||||||
|
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
context.parser.print_help()
|
||||||
|
|
||||||
|
|
||||||
|
def get_graph_execution_history(
|
||||||
|
graph_execution_state: GraphExecutionState,
|
||||||
|
) -> Iterable[str]:
|
||||||
|
"""Gets the history of fully-executed invocations for a graph execution"""
|
||||||
|
return (
|
||||||
|
n
|
||||||
|
for n in reversed(graph_execution_state.executed_history)
|
||||||
|
if n in graph_execution_state.graph.nodes
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_invocation_command(invocation) -> str:
|
||||||
|
fields = invocation.__fields__.items()
|
||||||
|
type_hints = get_type_hints(type(invocation))
|
||||||
|
command = [invocation.type]
|
||||||
|
for name, field in fields:
|
||||||
|
if name in ["id", "type"]:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# TODO: add links
|
||||||
|
|
||||||
|
# Skip image fields when serializing command
|
||||||
|
type_hint = type_hints.get(name) or None
|
||||||
|
if type_hint is ImageField or ImageField in get_args(type_hint):
|
||||||
|
continue
|
||||||
|
|
||||||
|
field_value = getattr(invocation, name)
|
||||||
|
field_default = field.default
|
||||||
|
if field_value != field_default:
|
||||||
|
if type_hint is str or str in get_args(type_hint):
|
||||||
|
command.append(f'--{name} "{field_value}"')
|
||||||
|
else:
|
||||||
|
command.append(f"--{name} {field_value}")
|
||||||
|
|
||||||
|
return " ".join(command)
|
||||||
|
|
||||||
|
|
||||||
|
class HistoryCommand(BaseCommand):
|
||||||
|
"""Shows the invocation history"""
|
||||||
|
type: Literal['history'] = 'history'
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
# fmt: off
|
||||||
|
count: int = Field(default=5, gt=0, description="The number of history entries to show")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
history = list(get_graph_execution_history(context.get_session()))
|
||||||
|
for i in range(min(self.count, len(history))):
|
||||||
|
entry_id = history[-1 - i]
|
||||||
|
entry = context.get_session().graph.get_node(entry_id)
|
||||||
|
print(f"{entry_id}: {get_invocation_command(entry)}")
|
||||||
|
|
||||||
|
|
||||||
|
class SetDefaultCommand(BaseCommand):
|
||||||
|
"""Sets a default value for a field"""
|
||||||
|
type: Literal['default'] = 'default'
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
# fmt: off
|
||||||
|
field: str = Field(description="The field to set the default for")
|
||||||
|
value: str = Field(description="The value to set the default to, or None to clear the default")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
if self.value is None:
|
||||||
|
if self.field in context.defaults:
|
||||||
|
del context.defaults[self.field]
|
||||||
|
else:
|
||||||
|
context.defaults[self.field] = self.value
|
167
invokeai/app/cli/completer.py
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
"""
|
||||||
|
Readline helper functions for cli_app.py
|
||||||
|
You may import the global singleton `completer` to get access to the
|
||||||
|
completer object.
|
||||||
|
"""
|
||||||
|
import atexit
|
||||||
|
import readline
|
||||||
|
import shlex
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Dict, Literal, get_args, get_type_hints, get_origin
|
||||||
|
|
||||||
|
from ...backend import ModelManager, Globals
|
||||||
|
from ..invocations.baseinvocation import BaseInvocation
|
||||||
|
from .commands import BaseCommand
|
||||||
|
|
||||||
|
# singleton object, class variable
|
||||||
|
completer = None
|
||||||
|
|
||||||
|
class Completer(object):
|
||||||
|
|
||||||
|
def __init__(self, model_manager: ModelManager):
|
||||||
|
self.commands = self.get_commands()
|
||||||
|
self.matches = None
|
||||||
|
self.linebuffer = None
|
||||||
|
self.manager = model_manager
|
||||||
|
return
|
||||||
|
|
||||||
|
def complete(self, text, state):
|
||||||
|
"""
|
||||||
|
Complete commands and switches fromm the node CLI command line.
|
||||||
|
Switches are determined in a context-specific manner.
|
||||||
|
"""
|
||||||
|
|
||||||
|
buffer = readline.get_line_buffer()
|
||||||
|
if state == 0:
|
||||||
|
options = None
|
||||||
|
try:
|
||||||
|
current_command, current_switch = self.get_current_command(buffer)
|
||||||
|
options = self.get_command_options(current_command, current_switch)
|
||||||
|
except IndexError:
|
||||||
|
pass
|
||||||
|
options = options or list(self.parse_commands().keys())
|
||||||
|
|
||||||
|
if not text: # first time
|
||||||
|
self.matches = options
|
||||||
|
else:
|
||||||
|
self.matches = [s for s in options if s and s.startswith(text)]
|
||||||
|
|
||||||
|
try:
|
||||||
|
match = self.matches[state]
|
||||||
|
except IndexError:
|
||||||
|
match = None
|
||||||
|
return match
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_commands(self)->List[object]:
|
||||||
|
"""
|
||||||
|
Return a list of all the client commands and invocations.
|
||||||
|
"""
|
||||||
|
return BaseCommand.get_commands() + BaseInvocation.get_invocations()
|
||||||
|
|
||||||
|
def get_current_command(self, buffer: str)->tuple[str, str]:
|
||||||
|
"""
|
||||||
|
Parse the readline buffer to find the most recent command and its switch.
|
||||||
|
"""
|
||||||
|
if len(buffer)==0:
|
||||||
|
return None, None
|
||||||
|
tokens = shlex.split(buffer)
|
||||||
|
command = None
|
||||||
|
switch = None
|
||||||
|
for t in tokens:
|
||||||
|
if t[0].isalpha():
|
||||||
|
if switch is None:
|
||||||
|
command = t
|
||||||
|
else:
|
||||||
|
switch = t
|
||||||
|
# don't try to autocomplete switches that are already complete
|
||||||
|
if switch and buffer.endswith(' '):
|
||||||
|
switch=None
|
||||||
|
return command or '', switch or ''
|
||||||
|
|
||||||
|
def parse_commands(self)->Dict[str, List[str]]:
|
||||||
|
"""
|
||||||
|
Return a dict in which the keys are the command name
|
||||||
|
and the values are the parameters the command takes.
|
||||||
|
"""
|
||||||
|
result = dict()
|
||||||
|
for command in self.commands:
|
||||||
|
hints = get_type_hints(command)
|
||||||
|
name = get_args(hints['type'])[0]
|
||||||
|
result.update({name:hints})
|
||||||
|
return result
|
||||||
|
|
||||||
|
def get_command_options(self, command: str, switch: str)->List[str]:
|
||||||
|
"""
|
||||||
|
Return all the parameters that can be passed to the command as
|
||||||
|
command-line switches. Returns None if the command is unrecognized.
|
||||||
|
"""
|
||||||
|
parsed_commands = self.parse_commands()
|
||||||
|
if command not in parsed_commands:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# handle switches in the format "-foo=bar"
|
||||||
|
argument = None
|
||||||
|
if switch and '=' in switch:
|
||||||
|
switch, argument = switch.split('=')
|
||||||
|
|
||||||
|
parameter = switch.strip('-')
|
||||||
|
if parameter in parsed_commands[command]:
|
||||||
|
if argument is None:
|
||||||
|
return self.get_parameter_options(parameter, parsed_commands[command][parameter])
|
||||||
|
else:
|
||||||
|
return [f"--{parameter}={x}" for x in self.get_parameter_options(parameter, parsed_commands[command][parameter])]
|
||||||
|
else:
|
||||||
|
return [f"--{x}" for x in parsed_commands[command].keys()]
|
||||||
|
|
||||||
|
def get_parameter_options(self, parameter: str, typehint)->List[str]:
|
||||||
|
"""
|
||||||
|
Given a parameter type (such as Literal), offers autocompletions.
|
||||||
|
"""
|
||||||
|
if get_origin(typehint) == Literal:
|
||||||
|
return get_args(typehint)
|
||||||
|
if parameter == 'model':
|
||||||
|
return self.manager.model_names()
|
||||||
|
|
||||||
|
def _pre_input_hook(self):
|
||||||
|
if self.linebuffer:
|
||||||
|
readline.insert_text(self.linebuffer)
|
||||||
|
readline.redisplay()
|
||||||
|
self.linebuffer = None
|
||||||
|
|
||||||
|
def set_autocompleter(model_manager: ModelManager) -> Completer:
|
||||||
|
global completer
|
||||||
|
|
||||||
|
if completer:
|
||||||
|
return completer
|
||||||
|
|
||||||
|
completer = Completer(model_manager)
|
||||||
|
|
||||||
|
readline.set_completer(completer.complete)
|
||||||
|
# pyreadline3 does not have a set_auto_history() method
|
||||||
|
try:
|
||||||
|
readline.set_auto_history(True)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
readline.set_pre_input_hook(completer._pre_input_hook)
|
||||||
|
readline.set_completer_delims(" ")
|
||||||
|
readline.parse_and_bind("tab: complete")
|
||||||
|
readline.parse_and_bind("set print-completions-horizontally off")
|
||||||
|
readline.parse_and_bind("set page-completions on")
|
||||||
|
readline.parse_and_bind("set skip-completed-text on")
|
||||||
|
readline.parse_and_bind("set show-all-if-ambiguous on")
|
||||||
|
|
||||||
|
histfile = Path(Globals.root, ".invoke_history")
|
||||||
|
try:
|
||||||
|
readline.read_history_file(histfile)
|
||||||
|
readline.set_history_length(1000)
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
except OSError: # file likely corrupted
|
||||||
|
newname = f"{histfile}.old"
|
||||||
|
print(
|
||||||
|
f"## Your history file {histfile} couldn't be loaded and may be corrupted. Renaming it to {newname}"
|
||||||
|
)
|
||||||
|
histfile.replace(Path(newname))
|
||||||
|
atexit.register(readline.write_history_file, histfile)
|
282
invokeai/app/cli_app.py
Normal file
@ -0,0 +1,282 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import shlex
|
||||||
|
import time
|
||||||
|
from typing import (
|
||||||
|
Union,
|
||||||
|
get_type_hints,
|
||||||
|
)
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from pydantic.fields import Field
|
||||||
|
|
||||||
|
from ..backend import Args
|
||||||
|
from .cli.commands import BaseCommand, CliContext, ExitCli, add_parsers, get_graph_execution_history
|
||||||
|
from .cli.completer import set_autocompleter
|
||||||
|
from .invocations import *
|
||||||
|
from .invocations.baseinvocation import BaseInvocation
|
||||||
|
from .services.events import EventServiceBase
|
||||||
|
from .services.model_manager_initializer import get_model_manager
|
||||||
|
from .services.restoration_services import RestorationServices
|
||||||
|
from .services.graph import Edge, EdgeConnection, GraphExecutionState
|
||||||
|
from .services.image_storage import DiskImageStorage
|
||||||
|
from .services.invocation_queue import MemoryInvocationQueue
|
||||||
|
from .services.invocation_services import InvocationServices
|
||||||
|
from .services.invoker import Invoker
|
||||||
|
from .services.processor import DefaultInvocationProcessor
|
||||||
|
from .services.sqlite import SqliteItemStorage
|
||||||
|
|
||||||
|
|
||||||
|
class CliCommand(BaseModel):
|
||||||
|
command: Union[BaseCommand.get_commands() + BaseInvocation.get_invocations()] = Field(discriminator="type") # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidArgs(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def add_invocation_args(command_parser):
|
||||||
|
# Add linking capability
|
||||||
|
command_parser.add_argument(
|
||||||
|
"--link",
|
||||||
|
"-l",
|
||||||
|
action="append",
|
||||||
|
nargs=3,
|
||||||
|
help="A link in the format 'dest_field source_node source_field'. source_node can be relative to history (e.g. -1)",
|
||||||
|
)
|
||||||
|
|
||||||
|
command_parser.add_argument(
|
||||||
|
"--link_node",
|
||||||
|
"-ln",
|
||||||
|
action="append",
|
||||||
|
help="A link from all fields in the specified node. Node can be relative to history (e.g. -1)",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_command_parser() -> argparse.ArgumentParser:
|
||||||
|
# Create invocation parser
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
|
||||||
|
def exit(*args, **kwargs):
|
||||||
|
raise InvalidArgs
|
||||||
|
|
||||||
|
parser.exit = exit
|
||||||
|
subparsers = parser.add_subparsers(dest="type")
|
||||||
|
|
||||||
|
# Create subparsers for each invocation
|
||||||
|
invocations = BaseInvocation.get_all_subclasses()
|
||||||
|
add_parsers(subparsers, invocations, add_arguments=add_invocation_args)
|
||||||
|
|
||||||
|
# Create subparsers for each command
|
||||||
|
commands = BaseCommand.get_all_subclasses()
|
||||||
|
add_parsers(subparsers, commands, exclude_fields=["type"])
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
def generate_matching_edges(
|
||||||
|
a: BaseInvocation, b: BaseInvocation
|
||||||
|
) -> list[Edge]:
|
||||||
|
"""Generates all possible edges between two invocations"""
|
||||||
|
atype = type(a)
|
||||||
|
btype = type(b)
|
||||||
|
|
||||||
|
aoutputtype = atype.get_output_type()
|
||||||
|
|
||||||
|
afields = get_type_hints(aoutputtype)
|
||||||
|
bfields = get_type_hints(btype)
|
||||||
|
|
||||||
|
matching_fields = set(afields.keys()).intersection(bfields.keys())
|
||||||
|
|
||||||
|
# Remove invalid fields
|
||||||
|
invalid_fields = set(["type", "id"])
|
||||||
|
matching_fields = matching_fields.difference(invalid_fields)
|
||||||
|
|
||||||
|
edges = [
|
||||||
|
Edge(
|
||||||
|
source=EdgeConnection(node_id=a.id, field=field),
|
||||||
|
destination=EdgeConnection(node_id=b.id, field=field)
|
||||||
|
)
|
||||||
|
for field in matching_fields
|
||||||
|
]
|
||||||
|
return edges
|
||||||
|
|
||||||
|
|
||||||
|
class SessionError(Exception):
|
||||||
|
"""Raised when a session error has occurred"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def invoke_all(context: CliContext):
|
||||||
|
"""Runs all invocations in the specified session"""
|
||||||
|
context.invoker.invoke(context.session, invoke_all=True)
|
||||||
|
while not context.get_session().is_complete():
|
||||||
|
# Wait some time
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
# Print any errors
|
||||||
|
if context.session.has_error():
|
||||||
|
for n in context.session.errors:
|
||||||
|
print(
|
||||||
|
f"Error in node {n} (source node {context.session.prepared_source_mapping[n]}): {context.session.errors[n]}"
|
||||||
|
)
|
||||||
|
|
||||||
|
raise SessionError()
|
||||||
|
|
||||||
|
|
||||||
|
def invoke_cli():
|
||||||
|
config = Args()
|
||||||
|
config.parse_args()
|
||||||
|
model_manager = get_model_manager(config)
|
||||||
|
|
||||||
|
# This initializes the autocompleter and returns it.
|
||||||
|
# Currently nothing is done with the returned Completer
|
||||||
|
# object, but the object can be used to change autocompletion
|
||||||
|
# behavior on the fly, if desired.
|
||||||
|
completer = set_autocompleter(model_manager)
|
||||||
|
|
||||||
|
events = EventServiceBase()
|
||||||
|
|
||||||
|
output_folder = os.path.abspath(
|
||||||
|
os.path.join(os.path.dirname(__file__), "../../../outputs")
|
||||||
|
)
|
||||||
|
|
||||||
|
# TODO: build a file/path manager?
|
||||||
|
db_location = os.path.join(output_folder, "invokeai.db")
|
||||||
|
|
||||||
|
services = InvocationServices(
|
||||||
|
model_manager=model_manager,
|
||||||
|
events=events,
|
||||||
|
images=DiskImageStorage(output_folder),
|
||||||
|
queue=MemoryInvocationQueue(),
|
||||||
|
graph_execution_manager=SqliteItemStorage[GraphExecutionState](
|
||||||
|
filename=db_location, table_name="graph_executions"
|
||||||
|
),
|
||||||
|
processor=DefaultInvocationProcessor(),
|
||||||
|
restoration=RestorationServices(config),
|
||||||
|
)
|
||||||
|
|
||||||
|
invoker = Invoker(services)
|
||||||
|
session: GraphExecutionState = invoker.create_execution_state()
|
||||||
|
parser = get_command_parser()
|
||||||
|
|
||||||
|
# Uncomment to print out previous sessions at startup
|
||||||
|
# print(services.session_manager.list())
|
||||||
|
|
||||||
|
context = CliContext(invoker, session, parser)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
cmd_input = input("invoke> ")
|
||||||
|
except (KeyboardInterrupt, EOFError):
|
||||||
|
# Ctrl-c exits
|
||||||
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Refresh the state of the session
|
||||||
|
history = list(get_graph_execution_history(context.session))
|
||||||
|
|
||||||
|
# Split the command for piping
|
||||||
|
cmds = cmd_input.split("|")
|
||||||
|
start_id = len(history)
|
||||||
|
current_id = start_id
|
||||||
|
new_invocations = list()
|
||||||
|
for cmd in cmds:
|
||||||
|
if cmd is None or cmd.strip() == "":
|
||||||
|
raise InvalidArgs("Empty command")
|
||||||
|
|
||||||
|
# Parse args to create invocation
|
||||||
|
args = vars(context.parser.parse_args(shlex.split(cmd.strip())))
|
||||||
|
|
||||||
|
# Override defaults
|
||||||
|
for field_name, field_default in context.defaults.items():
|
||||||
|
if field_name in args:
|
||||||
|
args[field_name] = field_default
|
||||||
|
|
||||||
|
# Parse invocation
|
||||||
|
args["id"] = current_id
|
||||||
|
command = CliCommand(command=args)
|
||||||
|
|
||||||
|
# Run any CLI commands immediately
|
||||||
|
if isinstance(command.command, BaseCommand):
|
||||||
|
# Invoke all current nodes to preserve operation order
|
||||||
|
invoke_all(context)
|
||||||
|
|
||||||
|
# Run the command
|
||||||
|
command.command.run(context)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Pipe previous command output (if there was a previous command)
|
||||||
|
edges: list[Edge] = list()
|
||||||
|
if len(history) > 0 or current_id != start_id:
|
||||||
|
from_id = (
|
||||||
|
history[0] if current_id == start_id else str(current_id - 1)
|
||||||
|
)
|
||||||
|
from_node = (
|
||||||
|
next(filter(lambda n: n[0].id == from_id, new_invocations))[0]
|
||||||
|
if current_id != start_id
|
||||||
|
else context.session.graph.get_node(from_id)
|
||||||
|
)
|
||||||
|
matching_edges = generate_matching_edges(
|
||||||
|
from_node, command.command
|
||||||
|
)
|
||||||
|
edges.extend(matching_edges)
|
||||||
|
|
||||||
|
# Parse provided links
|
||||||
|
if "link_node" in args and args["link_node"]:
|
||||||
|
for link in args["link_node"]:
|
||||||
|
link_node = context.session.graph.get_node(link)
|
||||||
|
matching_edges = generate_matching_edges(
|
||||||
|
link_node, command.command
|
||||||
|
)
|
||||||
|
matching_destinations = [e.destination for e in matching_edges]
|
||||||
|
edges = [e for e in edges if e.destination not in matching_destinations]
|
||||||
|
edges.extend(matching_edges)
|
||||||
|
|
||||||
|
if "link" in args and args["link"]:
|
||||||
|
for link in args["link"]:
|
||||||
|
edges = [e for e in edges if e.destination.node_id != command.command.id and e.destination.field != link[2]]
|
||||||
|
edges.append(
|
||||||
|
Edge(
|
||||||
|
source=EdgeConnection(node_id=link[1], field=link[0]),
|
||||||
|
destination=EdgeConnection(
|
||||||
|
node_id=command.command.id, field=link[2]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
new_invocations.append((command.command, edges))
|
||||||
|
|
||||||
|
current_id = current_id + 1
|
||||||
|
|
||||||
|
# Add the node to the session
|
||||||
|
context.session.add_node(command.command)
|
||||||
|
for edge in edges:
|
||||||
|
print(edge)
|
||||||
|
context.session.add_edge(edge)
|
||||||
|
|
||||||
|
# Execute all remaining nodes
|
||||||
|
invoke_all(context)
|
||||||
|
|
||||||
|
except InvalidArgs:
|
||||||
|
print('Invalid command, use "help" to list commands')
|
||||||
|
continue
|
||||||
|
|
||||||
|
except SessionError:
|
||||||
|
# Start a new session
|
||||||
|
print("Session error: creating a new session")
|
||||||
|
context.session = context.invoker.create_execution_state()
|
||||||
|
|
||||||
|
except ExitCli:
|
||||||
|
break
|
||||||
|
|
||||||
|
except SystemExit:
|
||||||
|
continue
|
||||||
|
|
||||||
|
invoker.stop()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
invoke_cli()
|
12
invokeai/app/invocations/__init__.py
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
__all__ = []
|
||||||
|
|
||||||
|
dirname = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
for f in os.listdir(dirname):
|
||||||
|
if (
|
||||||
|
f != "__init__.py"
|
||||||
|
and os.path.isfile("%s/%s" % (dirname, f))
|
||||||
|
and f[-3:] == ".py"
|
||||||
|
):
|
||||||
|
__all__.append(f[:-3])
|
78
invokeai/app/invocations/baseinvocation.py
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from inspect import signature
|
||||||
|
from typing import get_args, get_type_hints
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from ..services.invocation_services import InvocationServices
|
||||||
|
|
||||||
|
|
||||||
|
class InvocationContext:
|
||||||
|
services: InvocationServices
|
||||||
|
graph_execution_state_id: str
|
||||||
|
|
||||||
|
def __init__(self, services: InvocationServices, graph_execution_state_id: str):
|
||||||
|
self.services = services
|
||||||
|
self.graph_execution_state_id = graph_execution_state_id
|
||||||
|
|
||||||
|
|
||||||
|
class BaseInvocationOutput(BaseModel):
|
||||||
|
"""Base class for all invocation outputs"""
|
||||||
|
|
||||||
|
# All outputs must include a type name like this:
|
||||||
|
# type: Literal['your_output_name']
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_all_subclasses_tuple(cls):
|
||||||
|
subclasses = []
|
||||||
|
toprocess = [cls]
|
||||||
|
while len(toprocess) > 0:
|
||||||
|
next = toprocess.pop(0)
|
||||||
|
next_subclasses = next.__subclasses__()
|
||||||
|
subclasses.extend(next_subclasses)
|
||||||
|
toprocess.extend(next_subclasses)
|
||||||
|
return tuple(subclasses)
|
||||||
|
|
||||||
|
|
||||||
|
class BaseInvocation(ABC, BaseModel):
|
||||||
|
"""A node to process inputs and produce outputs.
|
||||||
|
May use dependency injection in __init__ to receive providers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# All invocations must include a type name like this:
|
||||||
|
# type: Literal['your_output_name']
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_all_subclasses(cls):
|
||||||
|
subclasses = []
|
||||||
|
toprocess = [cls]
|
||||||
|
while len(toprocess) > 0:
|
||||||
|
next = toprocess.pop(0)
|
||||||
|
next_subclasses = next.__subclasses__()
|
||||||
|
subclasses.extend(next_subclasses)
|
||||||
|
toprocess.extend(next_subclasses)
|
||||||
|
return subclasses
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_invocations(cls):
|
||||||
|
return tuple(BaseInvocation.get_all_subclasses())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_invocations_map(cls):
|
||||||
|
# Get the type strings out of the literals and into a dictionary
|
||||||
|
return dict(map(lambda t: (get_args(get_type_hints(t)['type'])[0], t),BaseInvocation.get_all_subclasses()))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_output_type(cls):
|
||||||
|
return signature(cls.invoke).return_annotation
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def invoke(self, context: InvocationContext) -> BaseInvocationOutput:
|
||||||
|
"""Invoke with provided context and return outputs."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
#fmt: off
|
||||||
|
id: str = Field(description="The id of this node. Must be unique among all nodes.")
|
||||||
|
#fmt: on
|
50
invokeai/app/invocations/cv.py
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
import cv2 as cv
|
||||||
|
import numpy
|
||||||
|
from PIL import Image, ImageOps
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from ..services.image_storage import ImageType
|
||||||
|
from .baseinvocation import BaseInvocation, InvocationContext
|
||||||
|
from .image import ImageField, ImageOutput
|
||||||
|
|
||||||
|
|
||||||
|
class CvInpaintInvocation(BaseInvocation):
|
||||||
|
"""Simple inpaint using opencv."""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["cv_inpaint"] = "cv_inpaint"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: ImageField = Field(default=None, description="The image to inpaint")
|
||||||
|
mask: ImageField = Field(default=None, description="The mask to use when inpainting")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
mask = context.services.images.get(self.mask.image_type, self.mask.image_name)
|
||||||
|
|
||||||
|
# Convert to cv image/mask
|
||||||
|
# TODO: consider making these utility functions
|
||||||
|
cv_image = cv.cvtColor(numpy.array(image.convert("RGB")), cv.COLOR_RGB2BGR)
|
||||||
|
cv_mask = numpy.array(ImageOps.invert(mask))
|
||||||
|
|
||||||
|
# Inpaint
|
||||||
|
cv_inpainted = cv.inpaint(cv_image, cv_mask, 3, cv.INPAINT_TELEA)
|
||||||
|
|
||||||
|
# Convert back to Pillow
|
||||||
|
# TODO: consider making a utility function
|
||||||
|
image_inpainted = Image.fromarray(cv.cvtColor(cv_inpainted, cv.COLOR_BGR2RGB))
|
||||||
|
|
||||||
|
image_type = ImageType.INTERMEDIATE
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
context.services.images.save(image_type, image_name, image_inpainted)
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_type=image_type, image_name=image_name)
|
||||||
|
)
|
242
invokeai/app/invocations/generate.py
Normal file
@ -0,0 +1,242 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from functools import partial
|
||||||
|
from typing import Literal, Optional, Union
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from torch import Tensor
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from ..services.image_storage import ImageType
|
||||||
|
from .baseinvocation import BaseInvocation, InvocationContext
|
||||||
|
from .image import ImageField, ImageOutput
|
||||||
|
from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator
|
||||||
|
from ...backend.stable_diffusion import PipelineIntermediateState
|
||||||
|
from ..util.util import diffusers_step_callback_adapter, CanceledException
|
||||||
|
|
||||||
|
SAMPLER_NAME_VALUES = Literal[
|
||||||
|
tuple(InvokeAIGenerator.schedulers())
|
||||||
|
]
|
||||||
|
|
||||||
|
# Text to image
|
||||||
|
class TextToImageInvocation(BaseInvocation):
|
||||||
|
"""Generates an image using text2img."""
|
||||||
|
|
||||||
|
type: Literal["txt2img"] = "txt2img"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
# TODO: consider making prompt optional to enable providing prompt through a link
|
||||||
|
# fmt: off
|
||||||
|
prompt: Optional[str] = Field(description="The prompt to generate an image from")
|
||||||
|
seed: int = Field(default=-1,ge=-1, le=np.iinfo(np.uint32).max, description="The seed to use (-1 for a random seed)", )
|
||||||
|
steps: int = Field(default=10, gt=0, description="The number of steps to use to generate the image")
|
||||||
|
width: int = Field(default=512, multiple_of=64, gt=0, description="The width of the resulting image", )
|
||||||
|
height: int = Field(default=512, multiple_of=64, gt=0, description="The height of the resulting image", )
|
||||||
|
cfg_scale: float = Field(default=7.5, gt=0, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", )
|
||||||
|
sampler_name: SAMPLER_NAME_VALUES = Field(default="k_lms", description="The sampler to use" )
|
||||||
|
seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", )
|
||||||
|
model: str = Field(default="", description="The model to use (currently ignored)")
|
||||||
|
progress_images: bool = Field(default=False, description="Whether or not to produce progress images during generation", )
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
# TODO: pass this an emitter method or something? or a session for dispatching?
|
||||||
|
def dispatch_progress(
|
||||||
|
self, context: InvocationContext, intermediate_state: PipelineIntermediateState
|
||||||
|
) -> None:
|
||||||
|
if (context.services.queue.is_canceled(context.graph_execution_state_id)):
|
||||||
|
raise CanceledException
|
||||||
|
|
||||||
|
step = intermediate_state.step
|
||||||
|
if intermediate_state.predicted_original is not None:
|
||||||
|
# Some schedulers report not only the noisy latents at the current timestep,
|
||||||
|
# but also their estimate so far of what the de-noised latents will be.
|
||||||
|
sample = intermediate_state.predicted_original
|
||||||
|
else:
|
||||||
|
sample = intermediate_state.latents
|
||||||
|
|
||||||
|
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
# def step_callback(state: PipelineIntermediateState):
|
||||||
|
# if (context.services.queue.is_canceled(context.graph_execution_state_id)):
|
||||||
|
# raise CanceledException
|
||||||
|
# self.dispatch_progress(context, state.latents, state.step)
|
||||||
|
|
||||||
|
# Handle invalid model parameter
|
||||||
|
# TODO: figure out if this can be done via a validator that uses the model_cache
|
||||||
|
# TODO: How to get the default model name now?
|
||||||
|
# (right now uses whatever current model is set in model manager)
|
||||||
|
model= context.services.model_manager.get_model()
|
||||||
|
outputs = Txt2Img(model).generate(
|
||||||
|
prompt=self.prompt,
|
||||||
|
step_callback=partial(self.dispatch_progress, context),
|
||||||
|
**self.dict(
|
||||||
|
exclude={"prompt"}
|
||||||
|
), # Shorthand for passing all of the parameters above manually
|
||||||
|
)
|
||||||
|
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
|
||||||
|
# each time it is called. We only need the first one.
|
||||||
|
generate_output = next(outputs)
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now and ignore the seed
|
||||||
|
# TODO: pre-seed?
|
||||||
|
# TODO: can this return multiple results? Should it?
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
context.services.images.save(image_type, image_name, generate_output.image)
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_type=image_type, image_name=image_name)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageToImageInvocation(TextToImageInvocation):
|
||||||
|
"""Generates an image using img2img."""
|
||||||
|
|
||||||
|
type: Literal["img2img"] = "img2img"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(description="The input image")
|
||||||
|
strength: float = Field(
|
||||||
|
default=0.75, gt=0, le=1, description="The strength of the original image"
|
||||||
|
)
|
||||||
|
fit: bool = Field(
|
||||||
|
default=True,
|
||||||
|
description="Whether or not the result should be fit to the aspect ratio of the input image",
|
||||||
|
)
|
||||||
|
|
||||||
|
def dispatch_progress(
|
||||||
|
self, context: InvocationContext, intermediate_state: PipelineIntermediateState
|
||||||
|
) -> None:
|
||||||
|
if (context.services.queue.is_canceled(context.graph_execution_state_id)):
|
||||||
|
raise CanceledException
|
||||||
|
|
||||||
|
step = intermediate_state.step
|
||||||
|
if intermediate_state.predicted_original is not None:
|
||||||
|
# Some schedulers report not only the noisy latents at the current timestep,
|
||||||
|
# but also their estimate so far of what the de-noised latents will be.
|
||||||
|
sample = intermediate_state.predicted_original
|
||||||
|
else:
|
||||||
|
sample = intermediate_state.latents
|
||||||
|
|
||||||
|
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = (
|
||||||
|
None
|
||||||
|
if self.image is None
|
||||||
|
else context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
)
|
||||||
|
mask = None
|
||||||
|
|
||||||
|
# Handle invalid model parameter
|
||||||
|
# TODO: figure out if this can be done via a validator that uses the model_cache
|
||||||
|
# TODO: How to get the default model name now?
|
||||||
|
model = context.services.model_manager.get_model()
|
||||||
|
outputs = Img2Img(model).generate(
|
||||||
|
prompt=self.prompt,
|
||||||
|
init_image=image,
|
||||||
|
init_mask=mask,
|
||||||
|
step_callback=partial(self.dispatch_progress, context),
|
||||||
|
**self.dict(
|
||||||
|
exclude={"prompt", "image", "mask"}
|
||||||
|
), # Shorthand for passing all of the parameters above manually
|
||||||
|
)
|
||||||
|
|
||||||
|
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
|
||||||
|
# each time it is called. We only need the first one.
|
||||||
|
generator_output = next(outputs)
|
||||||
|
|
||||||
|
result_image = generator_output.image
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now and ignore the seed
|
||||||
|
# TODO: pre-seed?
|
||||||
|
# TODO: can this return multiple results? Should it?
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
context.services.images.save(image_type, image_name, result_image)
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_type=image_type, image_name=image_name)
|
||||||
|
)
|
||||||
|
|
||||||
|
class InpaintInvocation(ImageToImageInvocation):
|
||||||
|
"""Generates an image using inpaint."""
|
||||||
|
|
||||||
|
type: Literal["inpaint"] = "inpaint"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
mask: Union[ImageField, None] = Field(description="The mask")
|
||||||
|
inpaint_replace: float = Field(
|
||||||
|
default=0.0,
|
||||||
|
ge=0.0,
|
||||||
|
le=1.0,
|
||||||
|
description="The amount by which to replace masked areas with latent noise",
|
||||||
|
)
|
||||||
|
|
||||||
|
def dispatch_progress(
|
||||||
|
self, context: InvocationContext, intermediate_state: PipelineIntermediateState
|
||||||
|
) -> None:
|
||||||
|
if (context.services.queue.is_canceled(context.graph_execution_state_id)):
|
||||||
|
raise CanceledException
|
||||||
|
|
||||||
|
step = intermediate_state.step
|
||||||
|
if intermediate_state.predicted_original is not None:
|
||||||
|
# Some schedulers report not only the noisy latents at the current timestep,
|
||||||
|
# but also their estimate so far of what the de-noised latents will be.
|
||||||
|
sample = intermediate_state.predicted_original
|
||||||
|
else:
|
||||||
|
sample = intermediate_state.latents
|
||||||
|
|
||||||
|
diffusers_step_callback_adapter(sample, step, steps=self.steps, id=self.id, context=context)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = (
|
||||||
|
None
|
||||||
|
if self.image is None
|
||||||
|
else context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
)
|
||||||
|
mask = (
|
||||||
|
None
|
||||||
|
if self.mask is None
|
||||||
|
else context.services.images.get(self.mask.image_type, self.mask.image_name)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Handle invalid model parameter
|
||||||
|
# TODO: figure out if this can be done via a validator that uses the model_cache
|
||||||
|
# TODO: How to get the default model name now?
|
||||||
|
model = context.services.model_manager.get_model()
|
||||||
|
outputs = Inpaint(model).generate(
|
||||||
|
prompt=self.prompt,
|
||||||
|
init_img=image,
|
||||||
|
init_mask=mask,
|
||||||
|
step_callback=partial(self.dispatch_progress, context),
|
||||||
|
**self.dict(
|
||||||
|
exclude={"prompt", "image", "mask"}
|
||||||
|
), # Shorthand for passing all of the parameters above manually
|
||||||
|
)
|
||||||
|
|
||||||
|
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
|
||||||
|
# each time it is called. We only need the first one.
|
||||||
|
generator_output = next(outputs)
|
||||||
|
|
||||||
|
result_image = generator_output.image
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now and ignore the seed
|
||||||
|
# TODO: pre-seed?
|
||||||
|
# TODO: can this return multiple results? Should it?
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
context.services.images.save(image_type, image_name, result_image)
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_type=image_type, image_name=image_name)
|
||||||
|
)
|
303
invokeai/app/invocations/image.py
Normal file
@ -0,0 +1,303 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from typing import Literal, Optional
|
||||||
|
|
||||||
|
import numpy
|
||||||
|
from PIL import Image, ImageFilter, ImageOps
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from ..services.image_storage import ImageType
|
||||||
|
from ..services.invocation_services import InvocationServices
|
||||||
|
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext
|
||||||
|
|
||||||
|
|
||||||
|
class ImageField(BaseModel):
|
||||||
|
"""An image field used for passing image objects between invocations"""
|
||||||
|
|
||||||
|
image_type: str = Field(
|
||||||
|
default=ImageType.RESULT, description="The type of the image"
|
||||||
|
)
|
||||||
|
image_name: Optional[str] = Field(default=None, description="The name of the image")
|
||||||
|
|
||||||
|
|
||||||
|
class ImageOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output an image"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["image"] = "image"
|
||||||
|
image: ImageField = Field(default=None, description="The output image")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
schema_extra = {
|
||||||
|
'required': [
|
||||||
|
'type',
|
||||||
|
'image',
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
class MaskOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output a mask"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["mask"] = "mask"
|
||||||
|
mask: ImageField = Field(default=None, description="The output mask")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
schema_extra = {
|
||||||
|
'required': [
|
||||||
|
'type',
|
||||||
|
'mask',
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
# TODO: this isn't really necessary anymore
|
||||||
|
class LoadImageInvocation(BaseInvocation):
|
||||||
|
"""Load an image from a filename and provide it as output."""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["load_image"] = "load_image"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image_type: ImageType = Field(description="The type of the image")
|
||||||
|
image_name: str = Field(description="The name of the image")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_type=self.image_type, image_name=self.image_name)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ShowImageInvocation(BaseInvocation):
|
||||||
|
"""Displays a provided image, and passes it forward in the pipeline."""
|
||||||
|
|
||||||
|
type: Literal["show_image"] = "show_image"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: ImageField = Field(default=None, description="The image to show")
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
if image:
|
||||||
|
image.show()
|
||||||
|
|
||||||
|
# TODO: how to handle failure?
|
||||||
|
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(
|
||||||
|
image_type=self.image.image_type, image_name=self.image.image_name
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class CropImageInvocation(BaseInvocation):
|
||||||
|
"""Crops an image to a specified box. The box can be outside of the image."""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["crop"] = "crop"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: ImageField = Field(default=None, description="The image to crop")
|
||||||
|
x: int = Field(default=0, description="The left x coordinate of the crop rectangle")
|
||||||
|
y: int = Field(default=0, description="The top y coordinate of the crop rectangle")
|
||||||
|
width: int = Field(default=512, gt=0, description="The width of the crop rectangle")
|
||||||
|
height: int = Field(default=512, gt=0, description="The height of the crop rectangle")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
|
||||||
|
image_crop = Image.new(
|
||||||
|
mode="RGBA", size=(self.width, self.height), color=(0, 0, 0, 0)
|
||||||
|
)
|
||||||
|
image_crop.paste(image, (-self.x, -self.y))
|
||||||
|
|
||||||
|
image_type = ImageType.INTERMEDIATE
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
context.services.images.save(image_type, image_name, image_crop)
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_type=image_type, image_name=image_name)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class PasteImageInvocation(BaseInvocation):
|
||||||
|
"""Pastes an image into another image."""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["paste"] = "paste"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
base_image: ImageField = Field(default=None, description="The base image")
|
||||||
|
image: ImageField = Field(default=None, description="The image to paste")
|
||||||
|
mask: Optional[ImageField] = Field(default=None, description="The mask to use when pasting")
|
||||||
|
x: int = Field(default=0, description="The left x coordinate at which to paste the image")
|
||||||
|
y: int = Field(default=0, description="The top y coordinate at which to paste the image")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
base_image = context.services.images.get(
|
||||||
|
self.base_image.image_type, self.base_image.image_name
|
||||||
|
)
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
mask = (
|
||||||
|
None
|
||||||
|
if self.mask is None
|
||||||
|
else ImageOps.invert(
|
||||||
|
services.images.get(self.mask.image_type, self.mask.image_name)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# TODO: probably shouldn't invert mask here... should user be required to do it?
|
||||||
|
|
||||||
|
min_x = min(0, self.x)
|
||||||
|
min_y = min(0, self.y)
|
||||||
|
max_x = max(base_image.width, image.width + self.x)
|
||||||
|
max_y = max(base_image.height, image.height + self.y)
|
||||||
|
|
||||||
|
new_image = Image.new(
|
||||||
|
mode="RGBA", size=(max_x - min_x, max_y - min_y), color=(0, 0, 0, 0)
|
||||||
|
)
|
||||||
|
new_image.paste(base_image, (abs(min_x), abs(min_y)))
|
||||||
|
new_image.paste(image, (max(0, self.x), max(0, self.y)), mask=mask)
|
||||||
|
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
context.services.images.save(image_type, image_name, new_image)
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_type=image_type, image_name=image_name)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class MaskFromAlphaInvocation(BaseInvocation):
|
||||||
|
"""Extracts the alpha channel of an image as a mask."""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["tomask"] = "tomask"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: ImageField = Field(default=None, description="The image to create the mask from")
|
||||||
|
invert: bool = Field(default=False, description="Whether or not to invert the mask")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> MaskOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
|
||||||
|
image_mask = image.split()[-1]
|
||||||
|
if self.invert:
|
||||||
|
image_mask = ImageOps.invert(image_mask)
|
||||||
|
|
||||||
|
image_type = ImageType.INTERMEDIATE
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
context.services.images.save(image_type, image_name, image_mask)
|
||||||
|
return MaskOutput(mask=ImageField(image_type=image_type, image_name=image_name))
|
||||||
|
|
||||||
|
|
||||||
|
class BlurInvocation(BaseInvocation):
|
||||||
|
"""Blurs an image"""
|
||||||
|
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["blur"] = "blur"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: ImageField = Field(default=None, description="The image to blur")
|
||||||
|
radius: float = Field(default=8.0, ge=0, description="The blur radius")
|
||||||
|
blur_type: Literal["gaussian", "box"] = Field(default="gaussian", description="The type of blur")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
|
||||||
|
blur = (
|
||||||
|
ImageFilter.GaussianBlur(self.radius)
|
||||||
|
if self.blur_type == "gaussian"
|
||||||
|
else ImageFilter.BoxBlur(self.radius)
|
||||||
|
)
|
||||||
|
blur_image = image.filter(blur)
|
||||||
|
|
||||||
|
image_type = ImageType.INTERMEDIATE
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
context.services.images.save(image_type, image_name, blur_image)
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_type=image_type, image_name=image_name)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class LerpInvocation(BaseInvocation):
|
||||||
|
"""Linear interpolation of all pixels of an image"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["lerp"] = "lerp"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: ImageField = Field(default=None, description="The image to lerp")
|
||||||
|
min: int = Field(default=0, ge=0, le=255, description="The minimum output value")
|
||||||
|
max: int = Field(default=255, ge=0, le=255, description="The maximum output value")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
|
||||||
|
image_arr = numpy.asarray(image, dtype=numpy.float32) / 255
|
||||||
|
image_arr = image_arr * (self.max - self.min) + self.max
|
||||||
|
|
||||||
|
lerp_image = Image.fromarray(numpy.uint8(image_arr))
|
||||||
|
|
||||||
|
image_type = ImageType.INTERMEDIATE
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
context.services.images.save(image_type, image_name, lerp_image)
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_type=image_type, image_name=image_name)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class InverseLerpInvocation(BaseInvocation):
|
||||||
|
"""Inverse linear interpolation of all pixels of an image"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["ilerp"] = "ilerp"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: ImageField = Field(default=None, description="The image to lerp")
|
||||||
|
min: int = Field(default=0, ge=0, le=255, description="The minimum input value")
|
||||||
|
max: int = Field(default=255, ge=0, le=255, description="The maximum input value")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
|
||||||
|
image_arr = numpy.asarray(image, dtype=numpy.float32)
|
||||||
|
image_arr = (
|
||||||
|
numpy.minimum(
|
||||||
|
numpy.maximum(image_arr - self.min, 0) / float(self.max - self.min), 1
|
||||||
|
)
|
||||||
|
* 255
|
||||||
|
)
|
||||||
|
|
||||||
|
ilerp_image = Image.fromarray(numpy.uint8(image_arr))
|
||||||
|
|
||||||
|
image_type = ImageType.INTERMEDIATE
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
context.services.images.save(image_type, image_name, ilerp_image)
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_type=image_type, image_name=image_name)
|
||||||
|
)
|
22
invokeai/app/invocations/prompt.py
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
from pydantic.fields import Field
|
||||||
|
|
||||||
|
from .baseinvocation import BaseInvocationOutput
|
||||||
|
|
||||||
|
|
||||||
|
class PromptOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output a prompt"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["prompt"] = "prompt"
|
||||||
|
|
||||||
|
prompt: str = Field(default=None, description="The output prompt")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
schema_extra = {
|
||||||
|
'required': [
|
||||||
|
'type',
|
||||||
|
'prompt',
|
||||||
|
]
|
||||||
|
}
|
42
invokeai/app/invocations/reconstruct.py
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
from datetime import datetime, timezone
|
||||||
|
from typing import Literal, Union
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from ..services.image_storage import ImageType
|
||||||
|
from ..services.invocation_services import InvocationServices
|
||||||
|
from .baseinvocation import BaseInvocation, InvocationContext
|
||||||
|
from .image import ImageField, ImageOutput
|
||||||
|
|
||||||
|
class RestoreFaceInvocation(BaseInvocation):
|
||||||
|
"""Restores faces in an image."""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["restore_face"] = "restore_face"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(description="The input image")
|
||||||
|
strength: float = Field(default=0.75, gt=0, le=1, description="The strength of the restoration" )
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
results = context.services.restoration.upscale_and_reconstruct(
|
||||||
|
image_list=[[image, 0]],
|
||||||
|
upscale=None,
|
||||||
|
strength=self.strength, # GFPGAN strength
|
||||||
|
save_original=False,
|
||||||
|
image_callback=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now
|
||||||
|
# TODO: can this return multiple results?
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
context.services.images.save(image_type, image_name, results[0][0])
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_type=image_type, image_name=image_name)
|
||||||
|
)
|
46
invokeai/app/invocations/upscale.py
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from typing import Literal, Union
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from ..services.image_storage import ImageType
|
||||||
|
from ..services.invocation_services import InvocationServices
|
||||||
|
from .baseinvocation import BaseInvocation, InvocationContext
|
||||||
|
from .image import ImageField, ImageOutput
|
||||||
|
|
||||||
|
|
||||||
|
class UpscaleInvocation(BaseInvocation):
|
||||||
|
"""Upscales an image."""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["upscale"] = "upscale"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(description="The input image", default=None)
|
||||||
|
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
|
||||||
|
level: Literal[2, 4] = Field(default=2, description="The upscale level")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
results = context.services.restoration.upscale_and_reconstruct(
|
||||||
|
image_list=[[image, 0]],
|
||||||
|
upscale=(self.level, self.strength),
|
||||||
|
strength=0.0, # GFPGAN strength
|
||||||
|
save_original=False,
|
||||||
|
image_callback=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now
|
||||||
|
# TODO: can this return multiple results?
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
context.services.images.save(image_type, image_name, results[0][0])
|
||||||
|
return ImageOutput(
|
||||||
|
image=ImageField(image_type=image_type, image_name=image_name)
|
||||||
|
)
|
88
invokeai/app/services/events.py
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from typing import Any, Dict, TypedDict
|
||||||
|
|
||||||
|
ProgressImage = TypedDict(
|
||||||
|
"ProgressImage", {"dataURL": str, "width": int, "height": int}
|
||||||
|
)
|
||||||
|
|
||||||
|
class EventServiceBase:
|
||||||
|
session_event: str = "session_event"
|
||||||
|
|
||||||
|
"""Basic event bus, to have an empty stand-in when not needed"""
|
||||||
|
|
||||||
|
def dispatch(self, event_name: str, payload: Any) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __emit_session_event(self, event_name: str, payload: Dict) -> None:
|
||||||
|
self.dispatch(
|
||||||
|
event_name=EventServiceBase.session_event,
|
||||||
|
payload=dict(event=event_name, data=payload),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Define events here for every event in the system.
|
||||||
|
# This will make them easier to integrate until we find a schema generator.
|
||||||
|
def emit_generator_progress(
|
||||||
|
self,
|
||||||
|
graph_execution_state_id: str,
|
||||||
|
invocation_id: str,
|
||||||
|
progress_image: ProgressImage | None,
|
||||||
|
step: int,
|
||||||
|
total_steps: int,
|
||||||
|
) -> None:
|
||||||
|
"""Emitted when there is generation progress"""
|
||||||
|
self.__emit_session_event(
|
||||||
|
event_name="generator_progress",
|
||||||
|
payload=dict(
|
||||||
|
graph_execution_state_id=graph_execution_state_id,
|
||||||
|
invocation_id=invocation_id,
|
||||||
|
progress_image=progress_image,
|
||||||
|
step=step,
|
||||||
|
total_steps=total_steps,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def emit_invocation_complete(
|
||||||
|
self, graph_execution_state_id: str, invocation_id: str, result: Dict
|
||||||
|
) -> None:
|
||||||
|
"""Emitted when an invocation has completed"""
|
||||||
|
self.__emit_session_event(
|
||||||
|
event_name="invocation_complete",
|
||||||
|
payload=dict(
|
||||||
|
graph_execution_state_id=graph_execution_state_id,
|
||||||
|
invocation_id=invocation_id,
|
||||||
|
result=result,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def emit_invocation_error(
|
||||||
|
self, graph_execution_state_id: str, invocation_id: str, error: str
|
||||||
|
) -> None:
|
||||||
|
"""Emitted when an invocation has completed"""
|
||||||
|
self.__emit_session_event(
|
||||||
|
event_name="invocation_error",
|
||||||
|
payload=dict(
|
||||||
|
graph_execution_state_id=graph_execution_state_id,
|
||||||
|
invocation_id=invocation_id,
|
||||||
|
error=error,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def emit_invocation_started(
|
||||||
|
self, graph_execution_state_id: str, invocation_id: str
|
||||||
|
) -> None:
|
||||||
|
"""Emitted when an invocation has started"""
|
||||||
|
self.__emit_session_event(
|
||||||
|
event_name="invocation_started",
|
||||||
|
payload=dict(
|
||||||
|
graph_execution_state_id=graph_execution_state_id,
|
||||||
|
invocation_id=invocation_id,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def emit_graph_execution_complete(self, graph_execution_state_id: str) -> None:
|
||||||
|
"""Emitted when a session has completed all invocations"""
|
||||||
|
self.__emit_session_event(
|
||||||
|
event_name="graph_execution_state_complete",
|
||||||
|
payload=dict(graph_execution_state_id=graph_execution_state_id),
|
||||||
|
)
|
1151
invokeai/app/services/graph.py
Normal file
113
invokeai/app/services/image_storage.py
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import os
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from enum import Enum
|
||||||
|
from pathlib import Path
|
||||||
|
from queue import Queue
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
|
from PIL.Image import Image
|
||||||
|
|
||||||
|
from invokeai.backend.image_util import PngWriter
|
||||||
|
|
||||||
|
|
||||||
|
class ImageType(str, Enum):
|
||||||
|
RESULT = "results"
|
||||||
|
INTERMEDIATE = "intermediates"
|
||||||
|
UPLOAD = "uploads"
|
||||||
|
|
||||||
|
|
||||||
|
class ImageStorageBase(ABC):
|
||||||
|
"""Responsible for storing and retrieving images."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get(self, image_type: ImageType, image_name: str) -> Image:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# TODO: make this a bit more flexible for e.g. cloud storage
|
||||||
|
@abstractmethod
|
||||||
|
def get_path(self, image_type: ImageType, image_name: str) -> str:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def save(self, image_type: ImageType, image_name: str, image: Image) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete(self, image_type: ImageType, image_name: str) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def create_name(self, context_id: str, node_id: str) -> str:
|
||||||
|
return f"{context_id}_{node_id}_{str(int(datetime.datetime.now(datetime.timezone.utc).timestamp()))}.png"
|
||||||
|
|
||||||
|
|
||||||
|
class DiskImageStorage(ImageStorageBase):
|
||||||
|
"""Stores images on disk"""
|
||||||
|
|
||||||
|
__output_folder: str
|
||||||
|
__pngWriter: PngWriter
|
||||||
|
__cache_ids: Queue # TODO: this is an incredibly naive cache
|
||||||
|
__cache: Dict[str, Image]
|
||||||
|
__max_cache_size: int
|
||||||
|
|
||||||
|
def __init__(self, output_folder: str):
|
||||||
|
self.__output_folder = output_folder
|
||||||
|
self.__pngWriter = PngWriter(output_folder)
|
||||||
|
self.__cache = dict()
|
||||||
|
self.__cache_ids = Queue()
|
||||||
|
self.__max_cache_size = 10 # TODO: get this from config
|
||||||
|
|
||||||
|
Path(output_folder).mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# TODO: don't hard-code. get/save/delete should maybe take subpath?
|
||||||
|
for image_type in ImageType:
|
||||||
|
Path(os.path.join(output_folder, image_type)).mkdir(
|
||||||
|
parents=True, exist_ok=True
|
||||||
|
)
|
||||||
|
|
||||||
|
def get(self, image_type: ImageType, image_name: str) -> Image:
|
||||||
|
image_path = self.get_path(image_type, image_name)
|
||||||
|
cache_item = self.__get_cache(image_path)
|
||||||
|
if cache_item:
|
||||||
|
return cache_item
|
||||||
|
|
||||||
|
image = Image.open(image_path)
|
||||||
|
self.__set_cache(image_path, image)
|
||||||
|
return image
|
||||||
|
|
||||||
|
# TODO: make this a bit more flexible for e.g. cloud storage
|
||||||
|
def get_path(self, image_type: ImageType, image_name: str) -> str:
|
||||||
|
path = os.path.join(self.__output_folder, image_type, image_name)
|
||||||
|
return path
|
||||||
|
|
||||||
|
def save(self, image_type: ImageType, image_name: str, image: Image) -> None:
|
||||||
|
image_subpath = os.path.join(image_type, image_name)
|
||||||
|
self.__pngWriter.save_image_and_prompt_to_png(
|
||||||
|
image, "", image_subpath, None
|
||||||
|
) # TODO: just pass full path to png writer
|
||||||
|
|
||||||
|
image_path = self.get_path(image_type, image_name)
|
||||||
|
self.__set_cache(image_path, image)
|
||||||
|
|
||||||
|
def delete(self, image_type: ImageType, image_name: str) -> None:
|
||||||
|
image_path = self.get_path(image_type, image_name)
|
||||||
|
if os.path.exists(image_path):
|
||||||
|
os.remove(image_path)
|
||||||
|
|
||||||
|
if image_path in self.__cache:
|
||||||
|
del self.__cache[image_path]
|
||||||
|
|
||||||
|
def __get_cache(self, image_name: str) -> Image:
|
||||||
|
return None if image_name not in self.__cache else self.__cache[image_name]
|
||||||
|
|
||||||
|
def __set_cache(self, image_name: str, image: Image):
|
||||||
|
if not image_name in self.__cache:
|
||||||
|
self.__cache[image_name] = image
|
||||||
|
self.__cache_ids.put(
|
||||||
|
image_name
|
||||||
|
) # TODO: this should refresh position for LRU cache
|
||||||
|
if len(self.__cache) > self.__max_cache_size:
|
||||||
|
cache_id = self.__cache_ids.get()
|
||||||
|
del self.__cache[cache_id]
|
81
invokeai/app/services/invocation_queue.py
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from queue import Queue
|
||||||
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: make this serializable
|
||||||
|
class InvocationQueueItem:
|
||||||
|
# session_id: str
|
||||||
|
graph_execution_state_id: str
|
||||||
|
invocation_id: str
|
||||||
|
invoke_all: bool
|
||||||
|
timestamp: float
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
# session_id: str,
|
||||||
|
graph_execution_state_id: str,
|
||||||
|
invocation_id: str,
|
||||||
|
invoke_all: bool = False,
|
||||||
|
):
|
||||||
|
# self.session_id = session_id
|
||||||
|
self.graph_execution_state_id = graph_execution_state_id
|
||||||
|
self.invocation_id = invocation_id
|
||||||
|
self.invoke_all = invoke_all
|
||||||
|
self.timestamp = time.time()
|
||||||
|
|
||||||
|
|
||||||
|
class InvocationQueueABC(ABC):
|
||||||
|
"""Abstract base class for all invocation queues"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get(self) -> InvocationQueueItem:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def put(self, item: InvocationQueueItem | None) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def cancel(self, graph_execution_state_id: str) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def is_canceled(self, graph_execution_state_id: str) -> bool:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class MemoryInvocationQueue(InvocationQueueABC):
|
||||||
|
__queue: Queue
|
||||||
|
__cancellations: dict[str, float]
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.__queue = Queue()
|
||||||
|
self.__cancellations = dict()
|
||||||
|
|
||||||
|
def get(self) -> InvocationQueueItem:
|
||||||
|
item = self.__queue.get()
|
||||||
|
|
||||||
|
while isinstance(item, InvocationQueueItem) \
|
||||||
|
and item.graph_execution_state_id in self.__cancellations \
|
||||||
|
and self.__cancellations[item.graph_execution_state_id] > item.timestamp:
|
||||||
|
item = self.__queue.get()
|
||||||
|
|
||||||
|
# Clear old items
|
||||||
|
for graph_execution_state_id in list(self.__cancellations.keys()):
|
||||||
|
if self.__cancellations[graph_execution_state_id] < item.timestamp:
|
||||||
|
del self.__cancellations[graph_execution_state_id]
|
||||||
|
|
||||||
|
return item
|
||||||
|
|
||||||
|
def put(self, item: InvocationQueueItem | None) -> None:
|
||||||
|
self.__queue.put(item)
|
||||||
|
|
||||||
|
def cancel(self, graph_execution_state_id: str) -> None:
|
||||||
|
if graph_execution_state_id not in self.__cancellations:
|
||||||
|
self.__cancellations[graph_execution_state_id] = time.time()
|
||||||
|
|
||||||
|
def is_canceled(self, graph_execution_state_id: str) -> bool:
|
||||||
|
return graph_execution_state_id in self.__cancellations
|
39
invokeai/app/services/invocation_services.py
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
from invokeai.backend import ModelManager
|
||||||
|
|
||||||
|
from .events import EventServiceBase
|
||||||
|
from .image_storage import ImageStorageBase
|
||||||
|
from .restoration_services import RestorationServices
|
||||||
|
from .invocation_queue import InvocationQueueABC
|
||||||
|
from .item_storage import ItemStorageABC
|
||||||
|
|
||||||
|
class InvocationServices:
|
||||||
|
"""Services that can be used by invocations"""
|
||||||
|
|
||||||
|
events: EventServiceBase
|
||||||
|
images: ImageStorageBase
|
||||||
|
queue: InvocationQueueABC
|
||||||
|
model_manager: ModelManager
|
||||||
|
restoration: RestorationServices
|
||||||
|
|
||||||
|
# NOTE: we must forward-declare any types that include invocations, since invocations can use services
|
||||||
|
graph_execution_manager: ItemStorageABC["GraphExecutionState"]
|
||||||
|
processor: "InvocationProcessorABC"
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model_manager: ModelManager,
|
||||||
|
events: EventServiceBase,
|
||||||
|
images: ImageStorageBase,
|
||||||
|
queue: InvocationQueueABC,
|
||||||
|
graph_execution_manager: ItemStorageABC["GraphExecutionState"],
|
||||||
|
processor: "InvocationProcessorABC",
|
||||||
|
restoration: RestorationServices,
|
||||||
|
):
|
||||||
|
self.model_manager = model_manager
|
||||||
|
self.events = events
|
||||||
|
self.images = images
|
||||||
|
self.queue = queue
|
||||||
|
self.graph_execution_manager = graph_execution_manager
|
||||||
|
self.processor = processor
|
||||||
|
self.restoration = restoration
|
91
invokeai/app/services/invoker.py
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from abc import ABC
|
||||||
|
from threading import Event, Thread
|
||||||
|
|
||||||
|
from ..invocations.baseinvocation import InvocationContext
|
||||||
|
from .graph import Graph, GraphExecutionState
|
||||||
|
from .invocation_queue import InvocationQueueABC, InvocationQueueItem
|
||||||
|
from .invocation_services import InvocationServices
|
||||||
|
from .item_storage import ItemStorageABC
|
||||||
|
|
||||||
|
|
||||||
|
class Invoker:
|
||||||
|
"""The invoker, used to execute invocations"""
|
||||||
|
|
||||||
|
services: InvocationServices
|
||||||
|
|
||||||
|
def __init__(self, services: InvocationServices):
|
||||||
|
self.services = services
|
||||||
|
self._start()
|
||||||
|
|
||||||
|
def invoke(
|
||||||
|
self, graph_execution_state: GraphExecutionState, invoke_all: bool = False
|
||||||
|
) -> str | None:
|
||||||
|
"""Determines the next node to invoke and returns the id of the invoked node, or None if there are no nodes to execute"""
|
||||||
|
|
||||||
|
# Get the next invocation
|
||||||
|
invocation = graph_execution_state.next()
|
||||||
|
if not invocation:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Save the execution state
|
||||||
|
self.services.graph_execution_manager.set(graph_execution_state)
|
||||||
|
|
||||||
|
# Queue the invocation
|
||||||
|
print(f"queueing item {invocation.id}")
|
||||||
|
self.services.queue.put(
|
||||||
|
InvocationQueueItem(
|
||||||
|
# session_id = session.id,
|
||||||
|
graph_execution_state_id=graph_execution_state.id,
|
||||||
|
invocation_id=invocation.id,
|
||||||
|
invoke_all=invoke_all,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return invocation.id
|
||||||
|
|
||||||
|
def create_execution_state(self, graph: Graph | None = None) -> GraphExecutionState:
|
||||||
|
"""Creates a new execution state for the given graph"""
|
||||||
|
new_state = GraphExecutionState(graph=Graph() if graph is None else graph)
|
||||||
|
self.services.graph_execution_manager.set(new_state)
|
||||||
|
return new_state
|
||||||
|
|
||||||
|
def cancel(self, graph_execution_state_id: str) -> None:
|
||||||
|
"""Cancels the given execution state"""
|
||||||
|
self.services.queue.cancel(graph_execution_state_id)
|
||||||
|
|
||||||
|
def __start_service(self, service) -> None:
|
||||||
|
# Call start() method on any services that have it
|
||||||
|
start_op = getattr(service, "start", None)
|
||||||
|
if callable(start_op):
|
||||||
|
start_op(self)
|
||||||
|
|
||||||
|
def __stop_service(self, service) -> None:
|
||||||
|
# Call stop() method on any services that have it
|
||||||
|
stop_op = getattr(service, "stop", None)
|
||||||
|
if callable(stop_op):
|
||||||
|
stop_op(self)
|
||||||
|
|
||||||
|
def _start(self) -> None:
|
||||||
|
"""Starts the invoker. This is called automatically when the invoker is created."""
|
||||||
|
for service in vars(self.services):
|
||||||
|
self.__start_service(getattr(self.services, service))
|
||||||
|
|
||||||
|
for service in vars(self.services):
|
||||||
|
self.__start_service(getattr(self.services, service))
|
||||||
|
|
||||||
|
def stop(self) -> None:
|
||||||
|
"""Stops the invoker. A new invoker will have to be created to execute further."""
|
||||||
|
# First stop all services
|
||||||
|
for service in vars(self.services):
|
||||||
|
self.__stop_service(getattr(self.services, service))
|
||||||
|
|
||||||
|
for service in vars(self.services):
|
||||||
|
self.__stop_service(getattr(self.services, service))
|
||||||
|
|
||||||
|
self.services.queue.put(None)
|
||||||
|
|
||||||
|
|
||||||
|
class InvocationProcessorABC(ABC):
|
||||||
|
pass
|
62
invokeai/app/services/item_storage.py
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Callable, Generic, TypeVar
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from pydantic.generics import GenericModel
|
||||||
|
|
||||||
|
T = TypeVar("T", bound=BaseModel)
|
||||||
|
|
||||||
|
|
||||||
|
class PaginatedResults(GenericModel, Generic[T]):
|
||||||
|
"""Paginated results"""
|
||||||
|
#fmt: off
|
||||||
|
items: list[T] = Field(description="Items")
|
||||||
|
page: int = Field(description="Current Page")
|
||||||
|
pages: int = Field(description="Total number of pages")
|
||||||
|
per_page: int = Field(description="Number of items per page")
|
||||||
|
total: int = Field(description="Total number of items in result")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
class ItemStorageABC(ABC, Generic[T]):
|
||||||
|
_on_changed_callbacks: list[Callable[[T], None]]
|
||||||
|
_on_deleted_callbacks: list[Callable[[str], None]]
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self._on_changed_callbacks = list()
|
||||||
|
self._on_deleted_callbacks = list()
|
||||||
|
|
||||||
|
"""Base item storage class"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get(self, item_id: str) -> T:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def set(self, item: T) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def list(self, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def search(
|
||||||
|
self, query: str, page: int = 0, per_page: int = 10
|
||||||
|
) -> PaginatedResults[T]:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def on_changed(self, on_changed: Callable[[T], None]) -> None:
|
||||||
|
"""Register a callback for when an item is changed"""
|
||||||
|
self._on_changed_callbacks.append(on_changed)
|
||||||
|
|
||||||
|
def on_deleted(self, on_deleted: Callable[[str], None]) -> None:
|
||||||
|
"""Register a callback for when an item is deleted"""
|
||||||
|
self._on_deleted_callbacks.append(on_deleted)
|
||||||
|
|
||||||
|
def _on_changed(self, item: T) -> None:
|
||||||
|
for callback in self._on_changed_callbacks:
|
||||||
|
callback(item)
|
||||||
|
|
||||||
|
def _on_deleted(self, item_id: str) -> None:
|
||||||
|
for callback in self._on_deleted_callbacks:
|
||||||
|
callback(item_id)
|
120
invokeai/app/services/model_manager_initializer.py
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import torch
|
||||||
|
from argparse import Namespace
|
||||||
|
from invokeai.backend import Args
|
||||||
|
from omegaconf import OmegaConf
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import invokeai.version
|
||||||
|
from ...backend import ModelManager
|
||||||
|
from ...backend.util import choose_precision, choose_torch_device
|
||||||
|
from ...backend import Globals
|
||||||
|
|
||||||
|
# TODO: Replace with an abstract class base ModelManagerBase
|
||||||
|
def get_model_manager(config: Args) -> ModelManager:
|
||||||
|
if not config.conf:
|
||||||
|
config_file = os.path.join(Globals.root, "configs", "models.yaml")
|
||||||
|
if not os.path.exists(config_file):
|
||||||
|
report_model_error(
|
||||||
|
config, FileNotFoundError(f"The file {config_file} could not be found.")
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f">> {invokeai.version.__app_name__}, version {invokeai.version.__version__}")
|
||||||
|
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
|
||||||
|
|
||||||
|
# these two lines prevent a horrible warning message from appearing
|
||||||
|
# when the frozen CLIP tokenizer is imported
|
||||||
|
import transformers # type: ignore
|
||||||
|
|
||||||
|
transformers.logging.set_verbosity_error()
|
||||||
|
import diffusers
|
||||||
|
|
||||||
|
diffusers.logging.set_verbosity_error()
|
||||||
|
|
||||||
|
# normalize the config directory relative to root
|
||||||
|
if not os.path.isabs(config.conf):
|
||||||
|
config.conf = os.path.normpath(os.path.join(Globals.root, config.conf))
|
||||||
|
|
||||||
|
if config.embeddings:
|
||||||
|
if not os.path.isabs(config.embedding_path):
|
||||||
|
embedding_path = os.path.normpath(
|
||||||
|
os.path.join(Globals.root, config.embedding_path)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
embedding_path = config.embedding_path
|
||||||
|
else:
|
||||||
|
embedding_path = None
|
||||||
|
|
||||||
|
# migrate legacy models
|
||||||
|
ModelManager.migrate_models()
|
||||||
|
|
||||||
|
# creating the model manager
|
||||||
|
try:
|
||||||
|
device = torch.device(choose_torch_device())
|
||||||
|
precision = 'float16' if config.precision=='float16' \
|
||||||
|
else 'float32' if config.precision=='float32' \
|
||||||
|
else choose_precision(device)
|
||||||
|
|
||||||
|
model_manager = ModelManager(
|
||||||
|
OmegaConf.load(config.conf),
|
||||||
|
precision=precision,
|
||||||
|
device_type=device,
|
||||||
|
max_loaded_models=config.max_loaded_models,
|
||||||
|
embedding_path = Path(embedding_path),
|
||||||
|
)
|
||||||
|
except (FileNotFoundError, TypeError, AssertionError) as e:
|
||||||
|
report_model_error(config, e)
|
||||||
|
except (IOError, KeyError) as e:
|
||||||
|
print(f"{e}. Aborting.")
|
||||||
|
sys.exit(-1)
|
||||||
|
|
||||||
|
# try to autoconvert new models
|
||||||
|
# autoimport new .ckpt files
|
||||||
|
if path := config.autoconvert:
|
||||||
|
model_manager.autoconvert_weights(
|
||||||
|
conf_path=config.conf,
|
||||||
|
weights_directory=path,
|
||||||
|
)
|
||||||
|
|
||||||
|
return model_manager
|
||||||
|
|
||||||
|
def report_model_error(opt: Namespace, e: Exception):
|
||||||
|
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
|
||||||
|
print(
|
||||||
|
"** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models."
|
||||||
|
)
|
||||||
|
yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE")
|
||||||
|
if yes_to_all:
|
||||||
|
print(
|
||||||
|
"** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
response = input(
|
||||||
|
"Do you want to run invokeai-configure script to select and/or reinstall models? [y] "
|
||||||
|
)
|
||||||
|
if response.startswith(("n", "N")):
|
||||||
|
return
|
||||||
|
|
||||||
|
print("invokeai-configure is launching....\n")
|
||||||
|
|
||||||
|
# Match arguments that were set on the CLI
|
||||||
|
# only the arguments accepted by the configuration script are parsed
|
||||||
|
root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else []
|
||||||
|
config = ["--config", opt.conf] if opt.conf is not None else []
|
||||||
|
previous_config = sys.argv
|
||||||
|
sys.argv = ["invokeai-configure"]
|
||||||
|
sys.argv.extend(root_dir)
|
||||||
|
sys.argv.extend(config.to_dict())
|
||||||
|
if yes_to_all is not None:
|
||||||
|
for arg in yes_to_all.split():
|
||||||
|
sys.argv.append(arg)
|
||||||
|
|
||||||
|
from invokeai.frontend.install import invokeai_configure
|
||||||
|
|
||||||
|
invokeai_configure()
|
||||||
|
# TODO: Figure out how to restart
|
||||||
|
# print('** InvokeAI will now restart')
|
||||||
|
# sys.argv = previous_args
|
||||||
|
# main() # would rather do a os.exec(), but doesn't exist?
|
||||||
|
# sys.exit(0)
|
124
invokeai/app/services/processor.py
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
import traceback
|
||||||
|
from threading import Event, Thread
|
||||||
|
|
||||||
|
from ..invocations.baseinvocation import InvocationContext
|
||||||
|
from .invocation_queue import InvocationQueueItem
|
||||||
|
from .invoker import InvocationProcessorABC, Invoker
|
||||||
|
from ..util.util import CanceledException
|
||||||
|
|
||||||
|
class DefaultInvocationProcessor(InvocationProcessorABC):
|
||||||
|
__invoker_thread: Thread
|
||||||
|
__stop_event: Event
|
||||||
|
__invoker: Invoker
|
||||||
|
|
||||||
|
def start(self, invoker) -> None:
|
||||||
|
self.__invoker = invoker
|
||||||
|
self.__stop_event = Event()
|
||||||
|
self.__invoker_thread = Thread(
|
||||||
|
name="invoker_processor",
|
||||||
|
target=self.__process,
|
||||||
|
kwargs=dict(stop_event=self.__stop_event),
|
||||||
|
)
|
||||||
|
self.__invoker_thread.daemon = (
|
||||||
|
True # TODO: probably better to just not use threads?
|
||||||
|
)
|
||||||
|
self.__invoker_thread.start()
|
||||||
|
|
||||||
|
def stop(self, *args, **kwargs) -> None:
|
||||||
|
self.__stop_event.set()
|
||||||
|
|
||||||
|
def __process(self, stop_event: Event):
|
||||||
|
try:
|
||||||
|
while not stop_event.is_set():
|
||||||
|
queue_item: InvocationQueueItem = self.__invoker.services.queue.get()
|
||||||
|
if not queue_item: # Probably stopping
|
||||||
|
continue
|
||||||
|
|
||||||
|
graph_execution_state = (
|
||||||
|
self.__invoker.services.graph_execution_manager.get(
|
||||||
|
queue_item.graph_execution_state_id
|
||||||
|
)
|
||||||
|
)
|
||||||
|
invocation = graph_execution_state.execution_graph.get_node(
|
||||||
|
queue_item.invocation_id
|
||||||
|
)
|
||||||
|
|
||||||
|
# Send starting event
|
||||||
|
self.__invoker.services.events.emit_invocation_started(
|
||||||
|
graph_execution_state_id=graph_execution_state.id,
|
||||||
|
invocation_id=invocation.id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Invoke
|
||||||
|
try:
|
||||||
|
outputs = invocation.invoke(
|
||||||
|
InvocationContext(
|
||||||
|
services=self.__invoker.services,
|
||||||
|
graph_execution_state_id=graph_execution_state.id,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check queue to see if this is canceled, and skip if so
|
||||||
|
if self.__invoker.services.queue.is_canceled(
|
||||||
|
graph_execution_state.id
|
||||||
|
):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Save outputs and history
|
||||||
|
graph_execution_state.complete(invocation.id, outputs)
|
||||||
|
|
||||||
|
# Save the state changes
|
||||||
|
self.__invoker.services.graph_execution_manager.set(
|
||||||
|
graph_execution_state
|
||||||
|
)
|
||||||
|
|
||||||
|
# Send complete event
|
||||||
|
self.__invoker.services.events.emit_invocation_complete(
|
||||||
|
graph_execution_state_id=graph_execution_state.id,
|
||||||
|
invocation_id=invocation.id,
|
||||||
|
result=outputs.dict(),
|
||||||
|
)
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
pass
|
||||||
|
|
||||||
|
except CanceledException:
|
||||||
|
pass
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
error = traceback.format_exc()
|
||||||
|
|
||||||
|
# Save error
|
||||||
|
graph_execution_state.set_node_error(invocation.id, error)
|
||||||
|
|
||||||
|
# Save the state changes
|
||||||
|
self.__invoker.services.graph_execution_manager.set(
|
||||||
|
graph_execution_state
|
||||||
|
)
|
||||||
|
|
||||||
|
# Send error event
|
||||||
|
self.__invoker.services.events.emit_invocation_error(
|
||||||
|
graph_execution_state_id=graph_execution_state.id,
|
||||||
|
invocation_id=invocation.id,
|
||||||
|
error=error,
|
||||||
|
)
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Check queue to see if this is canceled, and skip if so
|
||||||
|
if self.__invoker.services.queue.is_canceled(
|
||||||
|
graph_execution_state.id
|
||||||
|
):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Queue any further commands if invoking all
|
||||||
|
is_complete = graph_execution_state.is_complete()
|
||||||
|
if queue_item.invoke_all and not is_complete:
|
||||||
|
self.__invoker.invoke(graph_execution_state, invoke_all=True)
|
||||||
|
elif is_complete:
|
||||||
|
self.__invoker.services.events.emit_graph_execution_complete(
|
||||||
|
graph_execution_state.id
|
||||||
|
)
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
... # Log something?
|
109
invokeai/app/services/restoration_services.py
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
import torch
|
||||||
|
from ...backend.restoration import Restoration
|
||||||
|
from ...backend.util import choose_torch_device, CPU_DEVICE, MPS_DEVICE
|
||||||
|
|
||||||
|
# This should be a real base class for postprocessing functions,
|
||||||
|
# but right now we just instantiate the existing gfpgan, esrgan
|
||||||
|
# and codeformer functions.
|
||||||
|
class RestorationServices:
|
||||||
|
'''Face restoration and upscaling'''
|
||||||
|
|
||||||
|
def __init__(self,args):
|
||||||
|
try:
|
||||||
|
gfpgan, codeformer, esrgan = None, None, None
|
||||||
|
if args.restore or args.esrgan:
|
||||||
|
restoration = Restoration()
|
||||||
|
if args.restore:
|
||||||
|
gfpgan, codeformer = restoration.load_face_restore_models(
|
||||||
|
args.gfpgan_model_path
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print(">> Face restoration disabled")
|
||||||
|
if args.esrgan:
|
||||||
|
esrgan = restoration.load_esrgan(args.esrgan_bg_tile)
|
||||||
|
else:
|
||||||
|
print(">> Upscaling disabled")
|
||||||
|
else:
|
||||||
|
print(">> Face restoration and upscaling disabled")
|
||||||
|
except (ModuleNotFoundError, ImportError):
|
||||||
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
|
print(">> You may need to install the ESRGAN and/or GFPGAN modules")
|
||||||
|
self.device = torch.device(choose_torch_device())
|
||||||
|
self.gfpgan = gfpgan
|
||||||
|
self.codeformer = codeformer
|
||||||
|
self.esrgan = esrgan
|
||||||
|
|
||||||
|
# note that this one method does gfpgan and codepath reconstruction, as well as
|
||||||
|
# esrgan upscaling
|
||||||
|
# TO DO: refactor into separate methods
|
||||||
|
def upscale_and_reconstruct(
|
||||||
|
self,
|
||||||
|
image_list,
|
||||||
|
facetool="gfpgan",
|
||||||
|
upscale=None,
|
||||||
|
upscale_denoise_str=0.75,
|
||||||
|
strength=0.0,
|
||||||
|
codeformer_fidelity=0.75,
|
||||||
|
save_original=False,
|
||||||
|
image_callback=None,
|
||||||
|
prefix=None,
|
||||||
|
):
|
||||||
|
results = []
|
||||||
|
for r in image_list:
|
||||||
|
image, seed = r
|
||||||
|
try:
|
||||||
|
if strength > 0:
|
||||||
|
if self.gfpgan is not None or self.codeformer is not None:
|
||||||
|
if facetool == "gfpgan":
|
||||||
|
if self.gfpgan is None:
|
||||||
|
print(
|
||||||
|
">> GFPGAN not found. Face restoration is disabled."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
image = self.gfpgan.process(image, strength, seed)
|
||||||
|
if facetool == "codeformer":
|
||||||
|
if self.codeformer is None:
|
||||||
|
print(
|
||||||
|
">> CodeFormer not found. Face restoration is disabled."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
cf_device = (
|
||||||
|
CPU_DEVICE if self.device == MPS_DEVICE else self.device
|
||||||
|
)
|
||||||
|
image = self.codeformer.process(
|
||||||
|
image=image,
|
||||||
|
strength=strength,
|
||||||
|
device=cf_device,
|
||||||
|
seed=seed,
|
||||||
|
fidelity=codeformer_fidelity,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print(">> Face Restoration is disabled.")
|
||||||
|
if upscale is not None:
|
||||||
|
if self.esrgan is not None:
|
||||||
|
if len(upscale) < 2:
|
||||||
|
upscale.append(0.75)
|
||||||
|
image = self.esrgan.process(
|
||||||
|
image,
|
||||||
|
upscale[1],
|
||||||
|
seed,
|
||||||
|
int(upscale[0]),
|
||||||
|
denoise_str=upscale_denoise_str,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print(">> ESRGAN is disabled. Image not upscaled.")
|
||||||
|
except Exception as e:
|
||||||
|
print(
|
||||||
|
f">> Error running RealESRGAN or GFPGAN. Your image was not upscaled.\n{e}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if image_callback is not None:
|
||||||
|
image_callback(image, seed, upscaled=True, use_prefix=prefix)
|
||||||
|
else:
|
||||||
|
r[0] = image
|
||||||
|
|
||||||
|
results.append([image, seed])
|
||||||
|
|
||||||
|
return results
|
138
invokeai/app/services/sqlite.py
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
import sqlite3
|
||||||
|
from threading import Lock
|
||||||
|
from typing import Generic, TypeVar, Union, get_args
|
||||||
|
|
||||||
|
from pydantic import BaseModel, parse_raw_as
|
||||||
|
|
||||||
|
from .item_storage import ItemStorageABC, PaginatedResults
|
||||||
|
|
||||||
|
T = TypeVar("T", bound=BaseModel)
|
||||||
|
|
||||||
|
sqlite_memory = ":memory:"
|
||||||
|
|
||||||
|
|
||||||
|
class SqliteItemStorage(ItemStorageABC, Generic[T]):
|
||||||
|
_filename: str
|
||||||
|
_table_name: str
|
||||||
|
_conn: sqlite3.Connection
|
||||||
|
_cursor: sqlite3.Cursor
|
||||||
|
_id_field: str
|
||||||
|
_lock: Lock
|
||||||
|
|
||||||
|
def __init__(self, filename: str, table_name: str, id_field: str = "id"):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self._filename = filename
|
||||||
|
self._table_name = table_name
|
||||||
|
self._id_field = id_field # TODO: validate that T has this field
|
||||||
|
self._lock = Lock()
|
||||||
|
|
||||||
|
self._conn = sqlite3.connect(
|
||||||
|
self._filename, check_same_thread=False
|
||||||
|
) # TODO: figure out a better threading solution
|
||||||
|
self._cursor = self._conn.cursor()
|
||||||
|
|
||||||
|
self._create_table()
|
||||||
|
|
||||||
|
def _create_table(self):
|
||||||
|
try:
|
||||||
|
self._lock.acquire()
|
||||||
|
self._cursor.execute(
|
||||||
|
f"""CREATE TABLE IF NOT EXISTS {self._table_name} (
|
||||||
|
item TEXT,
|
||||||
|
id TEXT GENERATED ALWAYS AS (json_extract(item, '$.{self._id_field}')) VIRTUAL NOT NULL);"""
|
||||||
|
)
|
||||||
|
self._cursor.execute(
|
||||||
|
f"""CREATE UNIQUE INDEX IF NOT EXISTS {self._table_name}_id ON {self._table_name}(id);"""
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
def _parse_item(self, item: str) -> T:
|
||||||
|
item_type = get_args(self.__orig_class__)[0]
|
||||||
|
return parse_raw_as(item_type, item)
|
||||||
|
|
||||||
|
def set(self, item: T):
|
||||||
|
try:
|
||||||
|
self._lock.acquire()
|
||||||
|
self._cursor.execute(
|
||||||
|
f"""INSERT OR REPLACE INTO {self._table_name} (item) VALUES (?);""",
|
||||||
|
(item.json(),),
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
self._on_changed(item)
|
||||||
|
|
||||||
|
def get(self, id: str) -> Union[T, None]:
|
||||||
|
try:
|
||||||
|
self._lock.acquire()
|
||||||
|
self._cursor.execute(
|
||||||
|
f"""SELECT item FROM {self._table_name} WHERE id = ?;""", (str(id),)
|
||||||
|
)
|
||||||
|
result = self._cursor.fetchone()
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
if not result:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self._parse_item(result[0])
|
||||||
|
|
||||||
|
def delete(self, id: str):
|
||||||
|
try:
|
||||||
|
self._lock.acquire()
|
||||||
|
self._cursor.execute(
|
||||||
|
f"""DELETE FROM {self._table_name} WHERE id = ?;""", (str(id),)
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
self._on_deleted(id)
|
||||||
|
|
||||||
|
def list(self, page: int = 0, per_page: int = 10) -> PaginatedResults[T]:
|
||||||
|
try:
|
||||||
|
self._lock.acquire()
|
||||||
|
self._cursor.execute(
|
||||||
|
f"""SELECT item FROM {self._table_name} LIMIT ? OFFSET ?;""",
|
||||||
|
(per_page, page * per_page),
|
||||||
|
)
|
||||||
|
result = self._cursor.fetchall()
|
||||||
|
|
||||||
|
items = list(map(lambda r: self._parse_item(r[0]), result))
|
||||||
|
|
||||||
|
self._cursor.execute(f"""SELECT count(*) FROM {self._table_name};""")
|
||||||
|
count = self._cursor.fetchone()[0]
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
pageCount = int(count / per_page) + 1
|
||||||
|
|
||||||
|
return PaginatedResults[T](
|
||||||
|
items=items, page=page, pages=pageCount, per_page=per_page, total=count
|
||||||
|
)
|
||||||
|
|
||||||
|
def search(
|
||||||
|
self, query: str, page: int = 0, per_page: int = 10
|
||||||
|
) -> PaginatedResults[T]:
|
||||||
|
try:
|
||||||
|
self._lock.acquire()
|
||||||
|
self._cursor.execute(
|
||||||
|
f"""SELECT item FROM {self._table_name} WHERE item LIKE ? LIMIT ? OFFSET ?;""",
|
||||||
|
(f"%{query}%", per_page, page * per_page),
|
||||||
|
)
|
||||||
|
result = self._cursor.fetchall()
|
||||||
|
|
||||||
|
items = list(map(lambda r: self._parse_item(r[0]), result))
|
||||||
|
|
||||||
|
self._cursor.execute(
|
||||||
|
f"""SELECT count(*) FROM {self._table_name} WHERE item LIKE ?;""",
|
||||||
|
(f"%{query}%",),
|
||||||
|
)
|
||||||
|
count = self._cursor.fetchone()[0]
|
||||||
|
finally:
|
||||||
|
self._lock.release()
|
||||||
|
|
||||||
|
pageCount = int(count / per_page) + 1
|
||||||
|
|
||||||
|
return PaginatedResults[T](
|
||||||
|
items=items, page=page, pages=pageCount, per_page=per_page, total=count
|
||||||
|
)
|
42
invokeai/app/util/util.py
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
import torch
|
||||||
|
from PIL import Image
|
||||||
|
from ..invocations.baseinvocation import InvocationContext
|
||||||
|
from ...backend.util.util import image_to_dataURL
|
||||||
|
from ...backend.generator.base import Generator
|
||||||
|
from ...backend.stable_diffusion import PipelineIntermediateState
|
||||||
|
|
||||||
|
class CanceledException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def fast_latents_step_callback(sample: torch.Tensor, step: int, steps: int, id: str, context: InvocationContext, ):
|
||||||
|
# TODO: only output a preview image when requested
|
||||||
|
image = Generator.sample_to_lowres_estimated_image(sample)
|
||||||
|
|
||||||
|
(width, height) = image.size
|
||||||
|
width *= 8
|
||||||
|
height *= 8
|
||||||
|
|
||||||
|
dataURL = image_to_dataURL(image, image_format="JPEG")
|
||||||
|
|
||||||
|
context.services.events.emit_generator_progress(
|
||||||
|
context.graph_execution_state_id,
|
||||||
|
id,
|
||||||
|
{
|
||||||
|
"width": width,
|
||||||
|
"height": height,
|
||||||
|
"dataURL": dataURL
|
||||||
|
},
|
||||||
|
step,
|
||||||
|
steps,
|
||||||
|
)
|
||||||
|
|
||||||
|
def diffusers_step_callback_adapter(*cb_args, **kwargs):
|
||||||
|
"""
|
||||||
|
txt2img gives us a Tensor in the step_callbak, while img2img gives us a PipelineIntermediateState.
|
||||||
|
This adapter grabs the needed data and passes it along to the callback function.
|
||||||
|
"""
|
||||||
|
if isinstance(cb_args[0], PipelineIntermediateState):
|
||||||
|
progress_state: PipelineIntermediateState = cb_args[0]
|
||||||
|
return fast_latents_step_callback(progress_state.latents, progress_state.step, **kwargs)
|
||||||
|
else:
|
||||||
|
return fast_latents_step_callback(*cb_args, **kwargs)
|
@ -1,5 +1,16 @@
|
|||||||
'''
|
"""
|
||||||
Initialization file for invokeai.backend
|
Initialization file for invokeai.backend
|
||||||
'''
|
"""
|
||||||
from .invoke_ai_web_server import InvokeAIWebServer
|
from .generate import Generate
|
||||||
|
from .generator import (
|
||||||
|
InvokeAIGeneratorBasicParams,
|
||||||
|
InvokeAIGenerator,
|
||||||
|
InvokeAIGeneratorOutput,
|
||||||
|
Txt2Img,
|
||||||
|
Img2Img,
|
||||||
|
Inpaint
|
||||||
|
)
|
||||||
|
from .model_management import ModelManager
|
||||||
|
from .safety_checker import SafetyChecker
|
||||||
|
from .args import Args
|
||||||
|
from .globals import Globals
|
||||||
|
1387
invokeai/backend/args.py
Normal file
867
invokeai/backend/config/invokeai_configure.py
Executable file
@ -0,0 +1,867 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
|
||||||
|
# Before running stable-diffusion on an internet-isolated machine,
|
||||||
|
# run this script from one with internet connectivity. The
|
||||||
|
# two machines must share a common .cache directory.
|
||||||
|
#
|
||||||
|
# Coauthor: Kevin Turner http://github.com/keturn
|
||||||
|
#
|
||||||
|
import sys
|
||||||
|
print("Loading Python libraries...\n",file=sys.stderr)
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import io
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import traceback
|
||||||
|
import warnings
|
||||||
|
from argparse import Namespace
|
||||||
|
from pathlib import Path
|
||||||
|
from shutil import get_terminal_size
|
||||||
|
from urllib import request
|
||||||
|
|
||||||
|
import npyscreen
|
||||||
|
import torch
|
||||||
|
import transformers
|
||||||
|
from diffusers import AutoencoderKL
|
||||||
|
from huggingface_hub import HfFolder
|
||||||
|
from huggingface_hub import login as hf_hub_login
|
||||||
|
from omegaconf import OmegaConf
|
||||||
|
from tqdm import tqdm
|
||||||
|
from transformers import (
|
||||||
|
AutoProcessor,
|
||||||
|
CLIPSegForImageSegmentation,
|
||||||
|
CLIPTextModel,
|
||||||
|
CLIPTokenizer,
|
||||||
|
)
|
||||||
|
|
||||||
|
import invokeai.configs as configs
|
||||||
|
|
||||||
|
from ...frontend.install.model_install import addModelsForm, process_and_execute
|
||||||
|
from ...frontend.install.widgets import (
|
||||||
|
CenteredButtonPress,
|
||||||
|
IntTitleSlider,
|
||||||
|
set_min_terminal_size,
|
||||||
|
)
|
||||||
|
from ..args import PRECISION_CHOICES, Args
|
||||||
|
from ..globals import Globals, global_cache_dir, global_config_dir, global_config_file
|
||||||
|
from .model_install_backend import (
|
||||||
|
default_dataset,
|
||||||
|
download_from_hf,
|
||||||
|
hf_download_with_resume,
|
||||||
|
recommended_datasets,
|
||||||
|
)
|
||||||
|
|
||||||
|
warnings.filterwarnings("ignore")
|
||||||
|
|
||||||
|
transformers.logging.set_verbosity_error()
|
||||||
|
|
||||||
|
# --------------------------globals-----------------------
|
||||||
|
Model_dir = "models"
|
||||||
|
Weights_dir = "ldm/stable-diffusion-v1/"
|
||||||
|
|
||||||
|
# the initial "configs" dir is now bundled in the `invokeai.configs` package
|
||||||
|
Dataset_path = Path(configs.__path__[0]) / "INITIAL_MODELS.yaml"
|
||||||
|
|
||||||
|
Default_config_file = Path(global_config_dir()) / "models.yaml"
|
||||||
|
SD_Configs = Path(global_config_dir()) / "stable-diffusion"
|
||||||
|
|
||||||
|
Datasets = OmegaConf.load(Dataset_path)
|
||||||
|
|
||||||
|
# minimum size for the UI
|
||||||
|
MIN_COLS = 135
|
||||||
|
MIN_LINES = 45
|
||||||
|
|
||||||
|
INIT_FILE_PREAMBLE = """# InvokeAI initialization file
|
||||||
|
# This is the InvokeAI initialization file, which contains command-line default values.
|
||||||
|
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
|
||||||
|
# or renaming it and then running invokeai-configure again.
|
||||||
|
# Place frequently-used startup commands here, one or more per line.
|
||||||
|
# Examples:
|
||||||
|
# --outdir=D:\data\images
|
||||||
|
# --no-nsfw_checker
|
||||||
|
# --web --host=0.0.0.0
|
||||||
|
# --steps=20
|
||||||
|
# -Ak_euler_a -C10.0
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------------------
|
||||||
|
def postscript(errors: None):
|
||||||
|
if not any(errors):
|
||||||
|
message = f"""
|
||||||
|
** INVOKEAI INSTALLATION SUCCESSFUL **
|
||||||
|
If you installed manually from source or with 'pip install': activate the virtual environment
|
||||||
|
then run one of the following commands to start InvokeAI.
|
||||||
|
|
||||||
|
Web UI:
|
||||||
|
invokeai --web # (connect to http://localhost:9090)
|
||||||
|
invokeai --web --host 0.0.0.0 # (connect to http://your-lan-ip:9090 from another computer on the local network)
|
||||||
|
|
||||||
|
Command-line interface:
|
||||||
|
invokeai
|
||||||
|
|
||||||
|
If you installed using an installation script, run:
|
||||||
|
{Globals.root}/invoke.{"bat" if sys.platform == "win32" else "sh"}
|
||||||
|
|
||||||
|
Add the '--help' argument to see all of the command-line switches available for use.
|
||||||
|
"""
|
||||||
|
|
||||||
|
else:
|
||||||
|
message = "\n** There were errors during installation. It is possible some of the models were not fully downloaded.\n"
|
||||||
|
for err in errors:
|
||||||
|
message += f"\t - {err}\n"
|
||||||
|
message += "Please check the logs above and correct any issues."
|
||||||
|
|
||||||
|
print(message)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def yes_or_no(prompt: str, default_yes=True):
|
||||||
|
default = "y" if default_yes else "n"
|
||||||
|
response = input(f"{prompt} [{default}] ") or default
|
||||||
|
if default_yes:
|
||||||
|
return response[0] not in ("n", "N")
|
||||||
|
else:
|
||||||
|
return response[0] in ("y", "Y")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def HfLogin(access_token) -> str:
|
||||||
|
"""
|
||||||
|
Helper for logging in to Huggingface
|
||||||
|
The stdout capture is needed to hide the irrelevant "git credential helper" warning
|
||||||
|
"""
|
||||||
|
|
||||||
|
capture = io.StringIO()
|
||||||
|
sys.stdout = capture
|
||||||
|
try:
|
||||||
|
hf_hub_login(token=access_token, add_to_git_credential=False)
|
||||||
|
sys.stdout = sys.__stdout__
|
||||||
|
except Exception as exc:
|
||||||
|
sys.stdout = sys.__stdout__
|
||||||
|
print(exc)
|
||||||
|
raise exc
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
class ProgressBar:
|
||||||
|
def __init__(self, model_name="file"):
|
||||||
|
self.pbar = None
|
||||||
|
self.name = model_name
|
||||||
|
|
||||||
|
def __call__(self, block_num, block_size, total_size):
|
||||||
|
if not self.pbar:
|
||||||
|
self.pbar = tqdm(
|
||||||
|
desc=self.name,
|
||||||
|
initial=0,
|
||||||
|
unit="iB",
|
||||||
|
unit_scale=True,
|
||||||
|
unit_divisor=1000,
|
||||||
|
total=total_size,
|
||||||
|
)
|
||||||
|
self.pbar.update(block_size)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def download_with_progress_bar(model_url: str, model_dest: str, label: str = "the"):
|
||||||
|
try:
|
||||||
|
print(f"Installing {label} model file {model_url}...", end="", file=sys.stderr)
|
||||||
|
if not os.path.exists(model_dest):
|
||||||
|
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
|
||||||
|
request.urlretrieve(
|
||||||
|
model_url, model_dest, ProgressBar(os.path.basename(model_dest))
|
||||||
|
)
|
||||||
|
print("...downloaded successfully", file=sys.stderr)
|
||||||
|
else:
|
||||||
|
print("...exists", file=sys.stderr)
|
||||||
|
except Exception:
|
||||||
|
print("...download failed", file=sys.stderr)
|
||||||
|
print(f"Error downloading {label} model", file=sys.stderr)
|
||||||
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
# this will preload the Bert tokenizer fles
|
||||||
|
def download_bert():
|
||||||
|
print("Installing bert tokenizer...", file=sys.stderr)
|
||||||
|
with warnings.catch_warnings():
|
||||||
|
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
||||||
|
from transformers import BertTokenizerFast
|
||||||
|
|
||||||
|
download_from_hf(BertTokenizerFast, "bert-base-uncased")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def download_sd1_clip():
|
||||||
|
print("Installing SD1 clip model...", file=sys.stderr)
|
||||||
|
version = "openai/clip-vit-large-patch14"
|
||||||
|
download_from_hf(CLIPTokenizer, version)
|
||||||
|
download_from_hf(CLIPTextModel, version)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def download_sd2_clip():
|
||||||
|
version = "stabilityai/stable-diffusion-2"
|
||||||
|
print("Installing SD2 clip model...", file=sys.stderr)
|
||||||
|
download_from_hf(CLIPTokenizer, version, subfolder="tokenizer")
|
||||||
|
download_from_hf(CLIPTextModel, version, subfolder="text_encoder")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def download_realesrgan():
|
||||||
|
print("Installing models from RealESRGAN...", file=sys.stderr)
|
||||||
|
model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth"
|
||||||
|
wdn_model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth"
|
||||||
|
|
||||||
|
model_dest = os.path.join(
|
||||||
|
Globals.root, "models/realesrgan/realesr-general-x4v3.pth"
|
||||||
|
)
|
||||||
|
|
||||||
|
wdn_model_dest = os.path.join(
|
||||||
|
Globals.root, "models/realesrgan/realesr-general-wdn-x4v3.pth"
|
||||||
|
)
|
||||||
|
|
||||||
|
download_with_progress_bar(model_url, model_dest, "RealESRGAN")
|
||||||
|
download_with_progress_bar(wdn_model_url, wdn_model_dest, "RealESRGANwdn")
|
||||||
|
|
||||||
|
|
||||||
|
def download_gfpgan():
|
||||||
|
print("Installing GFPGAN models...", file=sys.stderr)
|
||||||
|
for model in (
|
||||||
|
[
|
||||||
|
"https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth",
|
||||||
|
"./models/gfpgan/GFPGANv1.4.pth",
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth",
|
||||||
|
"./models/gfpgan/weights/detection_Resnet50_Final.pth",
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"https://github.com/xinntao/facexlib/releases/download/v0.2.2/parsing_parsenet.pth",
|
||||||
|
"./models/gfpgan/weights/parsing_parsenet.pth",
|
||||||
|
],
|
||||||
|
):
|
||||||
|
model_url, model_dest = model[0], os.path.join(Globals.root, model[1])
|
||||||
|
download_with_progress_bar(model_url, model_dest, "GFPGAN weights")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def download_codeformer():
|
||||||
|
print("Installing CodeFormer model file...", file=sys.stderr)
|
||||||
|
model_url = (
|
||||||
|
"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth"
|
||||||
|
)
|
||||||
|
model_dest = os.path.join(Globals.root, "models/codeformer/codeformer.pth")
|
||||||
|
download_with_progress_bar(model_url, model_dest, "CodeFormer")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def download_clipseg():
|
||||||
|
print("Installing clipseg model for text-based masking...", file=sys.stderr)
|
||||||
|
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
|
||||||
|
try:
|
||||||
|
download_from_hf(AutoProcessor, CLIPSEG_MODEL)
|
||||||
|
download_from_hf(CLIPSegForImageSegmentation, CLIPSEG_MODEL)
|
||||||
|
except Exception:
|
||||||
|
print("Error installing clipseg model:")
|
||||||
|
print(traceback.format_exc())
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
def download_safety_checker():
|
||||||
|
print("Installing model for NSFW content detection...", file=sys.stderr)
|
||||||
|
try:
|
||||||
|
from diffusers.pipelines.stable_diffusion.safety_checker import (
|
||||||
|
StableDiffusionSafetyChecker,
|
||||||
|
)
|
||||||
|
from transformers import AutoFeatureExtractor
|
||||||
|
except ModuleNotFoundError:
|
||||||
|
print("Error installing NSFW checker model:")
|
||||||
|
print(traceback.format_exc())
|
||||||
|
return
|
||||||
|
safety_model_id = "CompVis/stable-diffusion-safety-checker"
|
||||||
|
print("AutoFeatureExtractor...", file=sys.stderr)
|
||||||
|
download_from_hf(AutoFeatureExtractor, safety_model_id)
|
||||||
|
print("StableDiffusionSafetyChecker...", file=sys.stderr)
|
||||||
|
download_from_hf(StableDiffusionSafetyChecker, safety_model_id)
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
def download_vaes():
|
||||||
|
print("Installing stabilityai VAE...", file=sys.stderr)
|
||||||
|
try:
|
||||||
|
# first the diffusers version
|
||||||
|
repo_id = "stabilityai/sd-vae-ft-mse"
|
||||||
|
args = dict(
|
||||||
|
cache_dir=global_cache_dir("hub"),
|
||||||
|
)
|
||||||
|
if not AutoencoderKL.from_pretrained(repo_id, **args):
|
||||||
|
raise Exception(f"download of {repo_id} failed")
|
||||||
|
|
||||||
|
repo_id = "stabilityai/sd-vae-ft-mse-original"
|
||||||
|
model_name = "vae-ft-mse-840000-ema-pruned.ckpt"
|
||||||
|
# next the legacy checkpoint version
|
||||||
|
if not hf_download_with_resume(
|
||||||
|
repo_id=repo_id,
|
||||||
|
model_name=model_name,
|
||||||
|
model_dir=str(Globals.root / Model_dir / Weights_dir),
|
||||||
|
):
|
||||||
|
raise Exception(f"download of {model_name} failed")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error downloading StabilityAI standard VAE: {str(e)}", file=sys.stderr)
|
||||||
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
def get_root(root: str = None) -> str:
|
||||||
|
if root:
|
||||||
|
return root
|
||||||
|
elif os.environ.get("INVOKEAI_ROOT"):
|
||||||
|
return os.environ.get("INVOKEAI_ROOT")
|
||||||
|
else:
|
||||||
|
return Globals.root
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
class editOptsForm(npyscreen.FormMultiPage):
|
||||||
|
# for responsive resizing - disabled
|
||||||
|
# FIX_MINIMUM_SIZE_WHEN_CREATED = False
|
||||||
|
|
||||||
|
def create(self):
|
||||||
|
program_opts = self.parentApp.program_opts
|
||||||
|
old_opts = self.parentApp.invokeai_opts
|
||||||
|
first_time = not (Globals.root / Globals.initfile).exists()
|
||||||
|
access_token = HfFolder.get_token()
|
||||||
|
window_width, window_height = get_terminal_size()
|
||||||
|
for i in [
|
||||||
|
"Configure startup settings. You can come back and change these later.",
|
||||||
|
"Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields.",
|
||||||
|
"Use cursor arrows to make a checkbox selection, and space to toggle.",
|
||||||
|
]:
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.FixedText,
|
||||||
|
value=i,
|
||||||
|
editable=False,
|
||||||
|
color="CONTROL",
|
||||||
|
)
|
||||||
|
|
||||||
|
self.nextrely += 1
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleFixedText,
|
||||||
|
name="== BASIC OPTIONS ==",
|
||||||
|
begin_entry_at=0,
|
||||||
|
editable=False,
|
||||||
|
color="CONTROL",
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.nextrely -= 1
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.FixedText,
|
||||||
|
value="Select an output directory for images:",
|
||||||
|
editable=False,
|
||||||
|
color="CONTROL",
|
||||||
|
)
|
||||||
|
self.outdir = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleFilename,
|
||||||
|
name="(<tab> autocompletes, ctrl-N advances):",
|
||||||
|
value=old_opts.outdir or str(default_output_dir()),
|
||||||
|
select_dir=True,
|
||||||
|
must_exist=False,
|
||||||
|
use_two_lines=False,
|
||||||
|
labelColor="GOOD",
|
||||||
|
begin_entry_at=40,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.nextrely += 1
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.FixedText,
|
||||||
|
value="Activate the NSFW checker to blur images showing potential sexual imagery:",
|
||||||
|
editable=False,
|
||||||
|
color="CONTROL",
|
||||||
|
)
|
||||||
|
self.safety_checker = self.add_widget_intelligent(
|
||||||
|
npyscreen.Checkbox,
|
||||||
|
name="NSFW checker",
|
||||||
|
value=old_opts.safety_checker,
|
||||||
|
relx=5,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.nextrely += 1
|
||||||
|
for i in [
|
||||||
|
"If you have an account at HuggingFace you may paste your access token here",
|
||||||
|
'to allow InvokeAI to download styles & subjects from the "Concept Library".',
|
||||||
|
"See https://huggingface.co/settings/tokens",
|
||||||
|
]:
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.FixedText,
|
||||||
|
value=i,
|
||||||
|
editable=False,
|
||||||
|
color="CONTROL",
|
||||||
|
)
|
||||||
|
|
||||||
|
self.hf_token = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitlePassword,
|
||||||
|
name="Access Token (ctrl-shift-V pastes):",
|
||||||
|
value=access_token,
|
||||||
|
begin_entry_at=42,
|
||||||
|
use_two_lines=False,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.nextrely += 1
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleFixedText,
|
||||||
|
name="== ADVANCED OPTIONS ==",
|
||||||
|
begin_entry_at=0,
|
||||||
|
editable=False,
|
||||||
|
color="CONTROL",
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.nextrely -= 1
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleFixedText,
|
||||||
|
name="GPU Management",
|
||||||
|
begin_entry_at=0,
|
||||||
|
editable=False,
|
||||||
|
color="CONTROL",
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.nextrely -= 1
|
||||||
|
self.free_gpu_mem = self.add_widget_intelligent(
|
||||||
|
npyscreen.Checkbox,
|
||||||
|
name="Free GPU memory after each generation",
|
||||||
|
value=old_opts.free_gpu_mem,
|
||||||
|
relx=5,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.xformers = self.add_widget_intelligent(
|
||||||
|
npyscreen.Checkbox,
|
||||||
|
name="Enable xformers support if available",
|
||||||
|
value=old_opts.xformers,
|
||||||
|
relx=5,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.ckpt_convert = self.add_widget_intelligent(
|
||||||
|
npyscreen.Checkbox,
|
||||||
|
name="Load legacy checkpoint models into memory as diffusers models",
|
||||||
|
value=old_opts.ckpt_convert,
|
||||||
|
relx=5,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.always_use_cpu = self.add_widget_intelligent(
|
||||||
|
npyscreen.Checkbox,
|
||||||
|
name="Force CPU to be used on GPU systems",
|
||||||
|
value=old_opts.always_use_cpu,
|
||||||
|
relx=5,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
precision = old_opts.precision or (
|
||||||
|
"float32" if program_opts.full_precision else "auto"
|
||||||
|
)
|
||||||
|
self.precision = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleSelectOne,
|
||||||
|
name="Precision",
|
||||||
|
values=PRECISION_CHOICES,
|
||||||
|
value=PRECISION_CHOICES.index(precision),
|
||||||
|
begin_entry_at=3,
|
||||||
|
max_height=len(PRECISION_CHOICES) + 1,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.max_loaded_models = self.add_widget_intelligent(
|
||||||
|
IntTitleSlider,
|
||||||
|
name="Number of models to cache in CPU memory (each will use 2-4 GB!)",
|
||||||
|
value=old_opts.max_loaded_models,
|
||||||
|
out_of=10,
|
||||||
|
lowest=1,
|
||||||
|
begin_entry_at=4,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.nextrely += 1
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.FixedText,
|
||||||
|
value="Directory containing embedding/textual inversion files:",
|
||||||
|
editable=False,
|
||||||
|
color="CONTROL",
|
||||||
|
)
|
||||||
|
self.embedding_path = self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleFilename,
|
||||||
|
name="(<tab> autocompletes, ctrl-N advances):",
|
||||||
|
value=str(default_embedding_dir()),
|
||||||
|
select_dir=True,
|
||||||
|
must_exist=False,
|
||||||
|
use_two_lines=False,
|
||||||
|
labelColor="GOOD",
|
||||||
|
begin_entry_at=40,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.nextrely += 1
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleFixedText,
|
||||||
|
name="== LICENSE ==",
|
||||||
|
begin_entry_at=0,
|
||||||
|
editable=False,
|
||||||
|
color="CONTROL",
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.nextrely -= 1
|
||||||
|
for i in [
|
||||||
|
"BY DOWNLOADING THE STABLE DIFFUSION WEIGHT FILES, YOU AGREE TO HAVE READ",
|
||||||
|
"AND ACCEPTED THE CREATIVEML RESPONSIBLE AI LICENSE LOCATED AT",
|
||||||
|
"https://huggingface.co/spaces/CompVis/stable-diffusion-license",
|
||||||
|
]:
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.FixedText,
|
||||||
|
value=i,
|
||||||
|
editable=False,
|
||||||
|
color="CONTROL",
|
||||||
|
)
|
||||||
|
self.license_acceptance = self.add_widget_intelligent(
|
||||||
|
npyscreen.Checkbox,
|
||||||
|
name="I accept the CreativeML Responsible AI License",
|
||||||
|
value=not first_time,
|
||||||
|
relx=2,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.nextrely += 1
|
||||||
|
label = (
|
||||||
|
"DONE"
|
||||||
|
if program_opts.skip_sd_weights or program_opts.default_only
|
||||||
|
else "NEXT"
|
||||||
|
)
|
||||||
|
self.ok_button = self.add_widget_intelligent(
|
||||||
|
CenteredButtonPress,
|
||||||
|
name=label,
|
||||||
|
relx=(window_width - len(label)) // 2,
|
||||||
|
rely=-3,
|
||||||
|
when_pressed_function=self.on_ok,
|
||||||
|
)
|
||||||
|
|
||||||
|
def on_ok(self):
|
||||||
|
options = self.marshall_arguments()
|
||||||
|
if self.validate_field_values(options):
|
||||||
|
self.parentApp.new_opts = options
|
||||||
|
if hasattr(self.parentApp, "model_select"):
|
||||||
|
self.parentApp.setNextForm("MODELS")
|
||||||
|
else:
|
||||||
|
self.parentApp.setNextForm(None)
|
||||||
|
self.editing = False
|
||||||
|
else:
|
||||||
|
self.editing = True
|
||||||
|
|
||||||
|
def validate_field_values(self, opt: Namespace) -> bool:
|
||||||
|
bad_fields = []
|
||||||
|
if not opt.license_acceptance:
|
||||||
|
bad_fields.append(
|
||||||
|
"Please accept the license terms before proceeding to model downloads"
|
||||||
|
)
|
||||||
|
if not Path(opt.outdir).parent.exists():
|
||||||
|
bad_fields.append(
|
||||||
|
f"The output directory does not seem to be valid. Please check that {str(Path(opt.outdir).parent)} is an existing directory."
|
||||||
|
)
|
||||||
|
if not Path(opt.embedding_path).parent.exists():
|
||||||
|
bad_fields.append(
|
||||||
|
f"The embedding directory does not seem to be valid. Please check that {str(Path(opt.embedding_path).parent)} is an existing directory."
|
||||||
|
)
|
||||||
|
if len(bad_fields) > 0:
|
||||||
|
message = "The following problems were detected and must be corrected:\n"
|
||||||
|
for problem in bad_fields:
|
||||||
|
message += f"* {problem}\n"
|
||||||
|
npyscreen.notify_confirm(message)
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
def marshall_arguments(self):
|
||||||
|
new_opts = Namespace()
|
||||||
|
|
||||||
|
for attr in [
|
||||||
|
"outdir",
|
||||||
|
"safety_checker",
|
||||||
|
"free_gpu_mem",
|
||||||
|
"max_loaded_models",
|
||||||
|
"xformers",
|
||||||
|
"always_use_cpu",
|
||||||
|
"embedding_path",
|
||||||
|
"ckpt_convert",
|
||||||
|
]:
|
||||||
|
setattr(new_opts, attr, getattr(self, attr).value)
|
||||||
|
|
||||||
|
new_opts.hf_token = self.hf_token.value
|
||||||
|
new_opts.license_acceptance = self.license_acceptance.value
|
||||||
|
new_opts.precision = PRECISION_CHOICES[self.precision.value[0]]
|
||||||
|
|
||||||
|
return new_opts
|
||||||
|
|
||||||
|
|
||||||
|
class EditOptApplication(npyscreen.NPSAppManaged):
|
||||||
|
def __init__(self, program_opts: Namespace, invokeai_opts: Namespace):
|
||||||
|
super().__init__()
|
||||||
|
self.program_opts = program_opts
|
||||||
|
self.invokeai_opts = invokeai_opts
|
||||||
|
self.user_cancelled = False
|
||||||
|
self.user_selections = default_user_selections(program_opts)
|
||||||
|
|
||||||
|
def onStart(self):
|
||||||
|
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
|
||||||
|
self.options = self.addForm(
|
||||||
|
"MAIN",
|
||||||
|
editOptsForm,
|
||||||
|
name="InvokeAI Startup Options",
|
||||||
|
)
|
||||||
|
if not (self.program_opts.skip_sd_weights or self.program_opts.default_only):
|
||||||
|
self.model_select = self.addForm(
|
||||||
|
"MODELS",
|
||||||
|
addModelsForm,
|
||||||
|
name="Install Stable Diffusion Models",
|
||||||
|
multipage=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
def new_opts(self):
|
||||||
|
return self.options.marshall_arguments()
|
||||||
|
|
||||||
|
|
||||||
|
def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Namespace:
|
||||||
|
editApp = EditOptApplication(program_opts, invokeai_opts)
|
||||||
|
editApp.run()
|
||||||
|
return editApp.new_opts()
|
||||||
|
|
||||||
|
|
||||||
|
def default_startup_options(init_file: Path) -> Namespace:
|
||||||
|
opts = Args().parse_args([])
|
||||||
|
outdir = Path(opts.outdir)
|
||||||
|
if not outdir.is_absolute():
|
||||||
|
opts.outdir = str(Globals.root / opts.outdir)
|
||||||
|
if not init_file.exists():
|
||||||
|
opts.safety_checker = True
|
||||||
|
return opts
|
||||||
|
|
||||||
|
|
||||||
|
def default_user_selections(program_opts: Namespace) -> Namespace:
|
||||||
|
return Namespace(
|
||||||
|
starter_models=default_dataset()
|
||||||
|
if program_opts.default_only
|
||||||
|
else recommended_datasets()
|
||||||
|
if program_opts.yes_to_all
|
||||||
|
else dict(),
|
||||||
|
purge_deleted_models=False,
|
||||||
|
scan_directory=None,
|
||||||
|
autoscan_on_startup=None,
|
||||||
|
import_model_paths=None,
|
||||||
|
convert_to_diffusers=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
def initialize_rootdir(root: str, yes_to_all: bool = False):
|
||||||
|
print("** INITIALIZING INVOKEAI RUNTIME DIRECTORY **")
|
||||||
|
|
||||||
|
for name in (
|
||||||
|
"models",
|
||||||
|
"configs",
|
||||||
|
"embeddings",
|
||||||
|
"text-inversion-output",
|
||||||
|
"text-inversion-training-data",
|
||||||
|
):
|
||||||
|
os.makedirs(os.path.join(root, name), exist_ok=True)
|
||||||
|
|
||||||
|
configs_src = Path(configs.__path__[0])
|
||||||
|
configs_dest = Path(root) / "configs"
|
||||||
|
if not os.path.samefile(configs_src, configs_dest):
|
||||||
|
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
def run_console_ui(
|
||||||
|
program_opts: Namespace, initfile: Path = None
|
||||||
|
) -> (Namespace, Namespace):
|
||||||
|
# parse_args() will read from init file if present
|
||||||
|
invokeai_opts = default_startup_options(initfile)
|
||||||
|
|
||||||
|
set_min_terminal_size(MIN_COLS, MIN_LINES)
|
||||||
|
editApp = EditOptApplication(program_opts, invokeai_opts)
|
||||||
|
editApp.run()
|
||||||
|
if editApp.user_cancelled:
|
||||||
|
return (None, None)
|
||||||
|
else:
|
||||||
|
return (editApp.new_opts, editApp.user_selections)
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
def write_opts(opts: Namespace, init_file: Path):
|
||||||
|
"""
|
||||||
|
Update the invokeai.init file with values from opts Namespace
|
||||||
|
"""
|
||||||
|
# touch file if it doesn't exist
|
||||||
|
if not init_file.exists():
|
||||||
|
with open(init_file, "w") as f:
|
||||||
|
f.write(INIT_FILE_PREAMBLE)
|
||||||
|
|
||||||
|
# We want to write in the changed arguments without clobbering
|
||||||
|
# any other initialization values the user has entered. There is
|
||||||
|
# no good way to do this because of the one-way nature of
|
||||||
|
# argparse: i.e. --outdir could be --outdir, --out, or -o
|
||||||
|
# initfile needs to be replaced with a fully structured format
|
||||||
|
# such as yaml; this is a hack that will work much of the time
|
||||||
|
args_to_skip = re.compile(
|
||||||
|
"^--?(o|out|no-xformer|xformer|no-ckpt|ckpt|free|no-nsfw|nsfw|prec|max_load|embed|always|ckpt|free_gpu)"
|
||||||
|
)
|
||||||
|
# fix windows paths
|
||||||
|
opts.outdir = opts.outdir.replace("\\", "/")
|
||||||
|
opts.embedding_path = opts.embedding_path.replace("\\", "/")
|
||||||
|
new_file = f"{init_file}.new"
|
||||||
|
try:
|
||||||
|
lines = [x.strip() for x in open(init_file, "r").readlines()]
|
||||||
|
with open(new_file, "w") as out_file:
|
||||||
|
for line in lines:
|
||||||
|
if len(line) > 0 and not args_to_skip.match(line):
|
||||||
|
out_file.write(line + "\n")
|
||||||
|
out_file.write(
|
||||||
|
f"""
|
||||||
|
--outdir={opts.outdir}
|
||||||
|
--embedding_path={opts.embedding_path}
|
||||||
|
--precision={opts.precision}
|
||||||
|
--max_loaded_models={int(opts.max_loaded_models)}
|
||||||
|
--{'no-' if not opts.safety_checker else ''}nsfw_checker
|
||||||
|
--{'no-' if not opts.xformers else ''}xformers
|
||||||
|
--{'no-' if not opts.ckpt_convert else ''}ckpt_convert
|
||||||
|
{'--free_gpu_mem' if opts.free_gpu_mem else ''}
|
||||||
|
{'--always_use_cpu' if opts.always_use_cpu else ''}
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
except OSError as e:
|
||||||
|
print(f"** An error occurred while writing the init file: {str(e)}")
|
||||||
|
|
||||||
|
os.replace(new_file, init_file)
|
||||||
|
|
||||||
|
if opts.hf_token:
|
||||||
|
HfLogin(opts.hf_token)
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
def default_output_dir() -> Path:
|
||||||
|
return Globals.root / "outputs"
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
def default_embedding_dir() -> Path:
|
||||||
|
return Globals.root / "embeddings"
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
def write_default_options(program_opts: Namespace, initfile: Path):
|
||||||
|
opt = default_startup_options(initfile)
|
||||||
|
opt.hf_token = HfFolder.get_token()
|
||||||
|
write_opts(opt, initfile)
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
||||||
|
parser.add_argument(
|
||||||
|
"--skip-sd-weights",
|
||||||
|
dest="skip_sd_weights",
|
||||||
|
action=argparse.BooleanOptionalAction,
|
||||||
|
default=False,
|
||||||
|
help="skip downloading the large Stable Diffusion weight files",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--skip-support-models",
|
||||||
|
dest="skip_support_models",
|
||||||
|
action=argparse.BooleanOptionalAction,
|
||||||
|
default=False,
|
||||||
|
help="skip downloading the support models",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--full-precision",
|
||||||
|
dest="full_precision",
|
||||||
|
action=argparse.BooleanOptionalAction,
|
||||||
|
type=bool,
|
||||||
|
default=False,
|
||||||
|
help="use 32-bit weights instead of faster 16-bit weights",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--yes",
|
||||||
|
"-y",
|
||||||
|
dest="yes_to_all",
|
||||||
|
action="store_true",
|
||||||
|
help='answer "yes" to all prompts',
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--default_only",
|
||||||
|
action="store_true",
|
||||||
|
help="when --yes specified, only install the default model",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--config_file",
|
||||||
|
"-c",
|
||||||
|
dest="config_file",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="path to configuration file to create",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--root_dir",
|
||||||
|
dest="root",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="path to root of install directory",
|
||||||
|
)
|
||||||
|
opt = parser.parse_args()
|
||||||
|
|
||||||
|
# setting a global here
|
||||||
|
Globals.root = Path(os.path.expanduser(get_root(opt.root) or ""))
|
||||||
|
|
||||||
|
errors = set()
|
||||||
|
|
||||||
|
try:
|
||||||
|
models_to_download = default_user_selections(opt)
|
||||||
|
|
||||||
|
# We check for to see if the runtime directory is correctly initialized.
|
||||||
|
init_file = Path(Globals.root, Globals.initfile)
|
||||||
|
if not init_file.exists() or not global_config_file().exists():
|
||||||
|
initialize_rootdir(Globals.root, opt.yes_to_all)
|
||||||
|
|
||||||
|
if opt.yes_to_all:
|
||||||
|
write_default_options(opt, init_file)
|
||||||
|
init_options = Namespace(
|
||||||
|
precision="float32" if opt.full_precision else "float16"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
init_options, models_to_download = run_console_ui(opt, init_file)
|
||||||
|
if init_options:
|
||||||
|
write_opts(init_options, init_file)
|
||||||
|
else:
|
||||||
|
print(
|
||||||
|
'\n** CANCELLED AT USER\'S REQUEST. USE THE "invoke.sh" LAUNCHER TO RUN LATER **\n'
|
||||||
|
)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
if opt.skip_support_models:
|
||||||
|
print("\n** SKIPPING SUPPORT MODEL DOWNLOADS PER USER REQUEST **")
|
||||||
|
else:
|
||||||
|
print("\n** DOWNLOADING SUPPORT MODELS **")
|
||||||
|
download_bert()
|
||||||
|
download_sd1_clip()
|
||||||
|
download_sd2_clip()
|
||||||
|
download_realesrgan()
|
||||||
|
download_gfpgan()
|
||||||
|
download_codeformer()
|
||||||
|
download_clipseg()
|
||||||
|
download_safety_checker()
|
||||||
|
download_vaes()
|
||||||
|
|
||||||
|
if opt.skip_sd_weights:
|
||||||
|
print("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **")
|
||||||
|
elif models_to_download:
|
||||||
|
print("\n** DOWNLOADING DIFFUSION WEIGHTS **")
|
||||||
|
process_and_execute(opt, models_to_download)
|
||||||
|
|
||||||
|
postscript(errors=errors)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\nGoodbye! Come back soon.")
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
465
invokeai/backend/config/model_install_backend.py
Normal file
@ -0,0 +1,465 @@
|
|||||||
|
"""
|
||||||
|
Utility (backend) functions used by model_install.py
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
import warnings
|
||||||
|
from pathlib import Path
|
||||||
|
from tempfile import TemporaryFile
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
import requests
|
||||||
|
from diffusers import AutoencoderKL
|
||||||
|
from huggingface_hub import hf_hub_url
|
||||||
|
from omegaconf import OmegaConf
|
||||||
|
from omegaconf.dictconfig import DictConfig
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
import invokeai.configs as configs
|
||||||
|
|
||||||
|
from ..globals import Globals, global_cache_dir, global_config_dir
|
||||||
|
from ..model_management import ModelManager
|
||||||
|
from ..stable_diffusion import StableDiffusionGeneratorPipeline
|
||||||
|
|
||||||
|
warnings.filterwarnings("ignore")
|
||||||
|
|
||||||
|
# --------------------------globals-----------------------
|
||||||
|
Model_dir = "models"
|
||||||
|
Weights_dir = "ldm/stable-diffusion-v1/"
|
||||||
|
|
||||||
|
# the initial "configs" dir is now bundled in the `invokeai.configs` package
|
||||||
|
Dataset_path = Path(configs.__path__[0]) / "INITIAL_MODELS.yaml"
|
||||||
|
|
||||||
|
# initial models omegaconf
|
||||||
|
Datasets = None
|
||||||
|
|
||||||
|
Config_preamble = """
|
||||||
|
# This file describes the alternative machine learning models
|
||||||
|
# available to InvokeAI script.
|
||||||
|
#
|
||||||
|
# To add a new model, follow the examples below. Each
|
||||||
|
# model requires a model config file, a weights file,
|
||||||
|
# and the width and height of the images it
|
||||||
|
# was trained on.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def default_config_file():
|
||||||
|
return Path(global_config_dir()) / "models.yaml"
|
||||||
|
|
||||||
|
|
||||||
|
def sd_configs():
|
||||||
|
return Path(global_config_dir()) / "stable-diffusion"
|
||||||
|
|
||||||
|
|
||||||
|
def initial_models():
|
||||||
|
global Datasets
|
||||||
|
if Datasets:
|
||||||
|
return Datasets
|
||||||
|
return (Datasets := OmegaConf.load(Dataset_path))
|
||||||
|
|
||||||
|
|
||||||
|
def install_requested_models(
|
||||||
|
install_initial_models: List[str] = None,
|
||||||
|
remove_models: List[str] = None,
|
||||||
|
scan_directory: Path = None,
|
||||||
|
external_models: List[str] = None,
|
||||||
|
scan_at_startup: bool = False,
|
||||||
|
convert_to_diffusers: bool = False,
|
||||||
|
precision: str = "float16",
|
||||||
|
purge_deleted: bool = False,
|
||||||
|
config_file_path: Path = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Entry point for installing/deleting starter models, or installing external models.
|
||||||
|
"""
|
||||||
|
config_file_path = config_file_path or default_config_file()
|
||||||
|
if not config_file_path.exists():
|
||||||
|
open(config_file_path, "w")
|
||||||
|
|
||||||
|
model_manager = ModelManager(OmegaConf.load(config_file_path), precision=precision)
|
||||||
|
|
||||||
|
if remove_models and len(remove_models) > 0:
|
||||||
|
print("== DELETING UNCHECKED STARTER MODELS ==")
|
||||||
|
for model in remove_models:
|
||||||
|
print(f"{model}...")
|
||||||
|
model_manager.del_model(model, delete_files=purge_deleted)
|
||||||
|
model_manager.commit(config_file_path)
|
||||||
|
|
||||||
|
if install_initial_models and len(install_initial_models) > 0:
|
||||||
|
print("== INSTALLING SELECTED STARTER MODELS ==")
|
||||||
|
successfully_downloaded = download_weight_datasets(
|
||||||
|
models=install_initial_models,
|
||||||
|
access_token=None,
|
||||||
|
precision=precision,
|
||||||
|
) # FIX: for historical reasons, we don't use model manager here
|
||||||
|
update_config_file(successfully_downloaded, config_file_path)
|
||||||
|
if len(successfully_downloaded) < len(install_initial_models):
|
||||||
|
print("** Some of the model downloads were not successful")
|
||||||
|
|
||||||
|
# due to above, we have to reload the model manager because conf file
|
||||||
|
# was changed behind its back
|
||||||
|
model_manager = ModelManager(OmegaConf.load(config_file_path), precision=precision)
|
||||||
|
|
||||||
|
external_models = external_models or list()
|
||||||
|
if scan_directory:
|
||||||
|
external_models.append(str(scan_directory))
|
||||||
|
|
||||||
|
if len(external_models) > 0:
|
||||||
|
print("== INSTALLING EXTERNAL MODELS ==")
|
||||||
|
for path_url_or_repo in external_models:
|
||||||
|
try:
|
||||||
|
model_manager.heuristic_import(
|
||||||
|
path_url_or_repo,
|
||||||
|
convert=convert_to_diffusers,
|
||||||
|
commit_to_conf=config_file_path,
|
||||||
|
)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
sys.exit(-1)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if scan_at_startup and scan_directory.is_dir():
|
||||||
|
argument = "--autoconvert" if convert_to_diffusers else "--autoimport"
|
||||||
|
initfile = Path(Globals.root, Globals.initfile)
|
||||||
|
replacement = Path(Globals.root, f"{Globals.initfile}.new")
|
||||||
|
directory = str(scan_directory).replace("\\", "/")
|
||||||
|
with open(initfile, "r") as input:
|
||||||
|
with open(replacement, "w") as output:
|
||||||
|
while line := input.readline():
|
||||||
|
if not line.startswith(argument):
|
||||||
|
output.writelines([line])
|
||||||
|
output.writelines([f"{argument} {directory}"])
|
||||||
|
os.replace(replacement, initfile)
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
def yes_or_no(prompt: str, default_yes=True):
|
||||||
|
default = "y" if default_yes else "n"
|
||||||
|
response = input(f"{prompt} [{default}] ") or default
|
||||||
|
if default_yes:
|
||||||
|
return response[0] not in ("n", "N")
|
||||||
|
else:
|
||||||
|
return response[0] in ("y", "Y")
|
||||||
|
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
def get_root(root: str = None) -> str:
|
||||||
|
if root:
|
||||||
|
return root
|
||||||
|
elif os.environ.get("INVOKEAI_ROOT"):
|
||||||
|
return os.environ.get("INVOKEAI_ROOT")
|
||||||
|
else:
|
||||||
|
return Globals.root
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def recommended_datasets() -> dict:
|
||||||
|
datasets = dict()
|
||||||
|
for ds in initial_models().keys():
|
||||||
|
if initial_models()[ds].get("recommended", False):
|
||||||
|
datasets[ds] = True
|
||||||
|
return datasets
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def default_dataset() -> dict:
|
||||||
|
datasets = dict()
|
||||||
|
for ds in initial_models().keys():
|
||||||
|
if initial_models()[ds].get("default", False):
|
||||||
|
datasets[ds] = True
|
||||||
|
return datasets
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def all_datasets() -> dict:
|
||||||
|
datasets = dict()
|
||||||
|
for ds in initial_models().keys():
|
||||||
|
datasets[ds] = True
|
||||||
|
return datasets
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
# look for legacy model.ckpt in models directory and offer to
|
||||||
|
# normalize its name
|
||||||
|
def migrate_models_ckpt():
|
||||||
|
model_path = os.path.join(Globals.root, Model_dir, Weights_dir)
|
||||||
|
if not os.path.exists(os.path.join(model_path, "model.ckpt")):
|
||||||
|
return
|
||||||
|
new_name = initial_models()["stable-diffusion-1.4"]["file"]
|
||||||
|
print(
|
||||||
|
'The Stable Diffusion v4.1 "model.ckpt" is already installed. The name will be changed to {new_name} to avoid confusion.'
|
||||||
|
)
|
||||||
|
print(f"model.ckpt => {new_name}")
|
||||||
|
os.replace(
|
||||||
|
os.path.join(model_path, "model.ckpt"), os.path.join(model_path, new_name)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def download_weight_datasets(
|
||||||
|
models: List[str], access_token: str, precision: str = "float32"
|
||||||
|
):
|
||||||
|
migrate_models_ckpt()
|
||||||
|
successful = dict()
|
||||||
|
for mod in models:
|
||||||
|
print(f"Downloading {mod}:")
|
||||||
|
successful[mod] = _download_repo_or_file(
|
||||||
|
initial_models()[mod], access_token, precision=precision
|
||||||
|
)
|
||||||
|
return successful
|
||||||
|
|
||||||
|
|
||||||
|
def _download_repo_or_file(
|
||||||
|
mconfig: DictConfig, access_token: str, precision: str = "float32"
|
||||||
|
) -> Path:
|
||||||
|
path = None
|
||||||
|
if mconfig["format"] == "ckpt":
|
||||||
|
path = _download_ckpt_weights(mconfig, access_token)
|
||||||
|
else:
|
||||||
|
path = _download_diffusion_weights(mconfig, access_token, precision=precision)
|
||||||
|
if "vae" in mconfig and "repo_id" in mconfig["vae"]:
|
||||||
|
_download_diffusion_weights(
|
||||||
|
mconfig["vae"], access_token, precision=precision
|
||||||
|
)
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path:
|
||||||
|
repo_id = mconfig["repo_id"]
|
||||||
|
filename = mconfig["file"]
|
||||||
|
cache_dir = os.path.join(Globals.root, Model_dir, Weights_dir)
|
||||||
|
return hf_download_with_resume(
|
||||||
|
repo_id=repo_id,
|
||||||
|
model_dir=cache_dir,
|
||||||
|
model_name=filename,
|
||||||
|
access_token=access_token,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def download_from_hf(
|
||||||
|
model_class: object, model_name: str, cache_subdir: Path = Path("hub"), **kwargs
|
||||||
|
):
|
||||||
|
path = global_cache_dir(cache_subdir)
|
||||||
|
model = model_class.from_pretrained(
|
||||||
|
model_name,
|
||||||
|
cache_dir=path,
|
||||||
|
resume_download=True,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
model_name = "--".join(("models", *model_name.split("/")))
|
||||||
|
return path / model_name if model else None
|
||||||
|
|
||||||
|
|
||||||
|
def _download_diffusion_weights(
|
||||||
|
mconfig: DictConfig, access_token: str, precision: str = "float32"
|
||||||
|
):
|
||||||
|
repo_id = mconfig["repo_id"]
|
||||||
|
model_class = (
|
||||||
|
StableDiffusionGeneratorPipeline
|
||||||
|
if mconfig.get("format", None) == "diffusers"
|
||||||
|
else AutoencoderKL
|
||||||
|
)
|
||||||
|
extra_arg_list = [{"revision": "fp16"}, {}] if precision == "float16" else [{}]
|
||||||
|
path = None
|
||||||
|
for extra_args in extra_arg_list:
|
||||||
|
try:
|
||||||
|
path = download_from_hf(
|
||||||
|
model_class,
|
||||||
|
repo_id,
|
||||||
|
safety_checker=None,
|
||||||
|
**extra_args,
|
||||||
|
)
|
||||||
|
except OSError as e:
|
||||||
|
if str(e).startswith("fp16 is not a valid"):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
print(f"An unexpected error occurred while downloading the model: {e})")
|
||||||
|
if path:
|
||||||
|
break
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def hf_download_with_resume(
|
||||||
|
repo_id: str, model_dir: str, model_name: str, access_token: str = None
|
||||||
|
) -> Path:
|
||||||
|
model_dest = Path(os.path.join(model_dir, model_name))
|
||||||
|
os.makedirs(model_dir, exist_ok=True)
|
||||||
|
|
||||||
|
url = hf_hub_url(repo_id, model_name)
|
||||||
|
|
||||||
|
header = {"Authorization": f"Bearer {access_token}"} if access_token else {}
|
||||||
|
open_mode = "wb"
|
||||||
|
exist_size = 0
|
||||||
|
|
||||||
|
if os.path.exists(model_dest):
|
||||||
|
exist_size = os.path.getsize(model_dest)
|
||||||
|
header["Range"] = f"bytes={exist_size}-"
|
||||||
|
open_mode = "ab"
|
||||||
|
|
||||||
|
resp = requests.get(url, headers=header, stream=True)
|
||||||
|
total = int(resp.headers.get("content-length", 0))
|
||||||
|
|
||||||
|
if (
|
||||||
|
resp.status_code == 416
|
||||||
|
): # "range not satisfiable", which means nothing to return
|
||||||
|
print(f"* {model_name}: complete file found. Skipping.")
|
||||||
|
return model_dest
|
||||||
|
elif resp.status_code != 200:
|
||||||
|
print(f"** An error occurred during downloading {model_name}: {resp.reason}")
|
||||||
|
elif exist_size > 0:
|
||||||
|
print(f"* {model_name}: partial file found. Resuming...")
|
||||||
|
else:
|
||||||
|
print(f"* {model_name}: Downloading...")
|
||||||
|
|
||||||
|
try:
|
||||||
|
if total < 2000:
|
||||||
|
print(f"*** ERROR DOWNLOADING {model_name}: {resp.text}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
with open(model_dest, open_mode) as file, tqdm(
|
||||||
|
desc=model_name,
|
||||||
|
initial=exist_size,
|
||||||
|
total=total + exist_size,
|
||||||
|
unit="iB",
|
||||||
|
unit_scale=True,
|
||||||
|
unit_divisor=1000,
|
||||||
|
) as bar:
|
||||||
|
for data in resp.iter_content(chunk_size=1024):
|
||||||
|
size = file.write(data)
|
||||||
|
bar.update(size)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"An error occurred while downloading {model_name}: {str(e)}")
|
||||||
|
return None
|
||||||
|
return model_dest
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def update_config_file(successfully_downloaded: dict, config_file: Path):
|
||||||
|
config_file = (
|
||||||
|
Path(config_file) if config_file is not None else default_config_file()
|
||||||
|
)
|
||||||
|
|
||||||
|
# In some cases (incomplete setup, etc), the default configs directory might be missing.
|
||||||
|
# Create it if it doesn't exist.
|
||||||
|
# this check is ignored if opt.config_file is specified - user is assumed to know what they
|
||||||
|
# are doing if they are passing a custom config file from elsewhere.
|
||||||
|
if config_file is default_config_file() and not config_file.parent.exists():
|
||||||
|
configs_src = Dataset_path.parent
|
||||||
|
configs_dest = default_config_file().parent
|
||||||
|
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
|
||||||
|
|
||||||
|
yaml = new_config_file_contents(successfully_downloaded, config_file)
|
||||||
|
|
||||||
|
try:
|
||||||
|
backup = None
|
||||||
|
if os.path.exists(config_file):
|
||||||
|
print(
|
||||||
|
f"** {config_file.name} exists. Renaming to {config_file.stem}.yaml.orig"
|
||||||
|
)
|
||||||
|
backup = config_file.with_suffix(".yaml.orig")
|
||||||
|
## Ugh. Windows is unable to overwrite an existing backup file, raises a WinError 183
|
||||||
|
if sys.platform == "win32" and backup.is_file():
|
||||||
|
backup.unlink()
|
||||||
|
config_file.rename(backup)
|
||||||
|
|
||||||
|
with TemporaryFile() as tmp:
|
||||||
|
tmp.write(Config_preamble.encode())
|
||||||
|
tmp.write(yaml.encode())
|
||||||
|
|
||||||
|
with open(str(config_file.expanduser().resolve()), "wb") as new_config:
|
||||||
|
tmp.seek(0)
|
||||||
|
new_config.write(tmp.read())
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"**Error creating config file {config_file}: {str(e)} **")
|
||||||
|
if backup is not None:
|
||||||
|
print("restoring previous config file")
|
||||||
|
## workaround, for WinError 183, see above
|
||||||
|
if sys.platform == "win32" and config_file.is_file():
|
||||||
|
config_file.unlink()
|
||||||
|
backup.rename(config_file)
|
||||||
|
return
|
||||||
|
|
||||||
|
print(f"Successfully created new configuration file {config_file}")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def new_config_file_contents(
|
||||||
|
successfully_downloaded: dict,
|
||||||
|
config_file: Path,
|
||||||
|
) -> str:
|
||||||
|
if config_file.exists():
|
||||||
|
conf = OmegaConf.load(str(config_file.expanduser().resolve()))
|
||||||
|
else:
|
||||||
|
conf = OmegaConf.create()
|
||||||
|
|
||||||
|
default_selected = None
|
||||||
|
for model in successfully_downloaded:
|
||||||
|
# a bit hacky - what we are doing here is seeing whether a checkpoint
|
||||||
|
# version of the model was previously defined, and whether the current
|
||||||
|
# model is a diffusers (indicated with a path)
|
||||||
|
if conf.get(model) and Path(successfully_downloaded[model]).is_dir():
|
||||||
|
delete_weights(model, conf[model])
|
||||||
|
|
||||||
|
stanza = {}
|
||||||
|
mod = initial_models()[model]
|
||||||
|
stanza["description"] = mod["description"]
|
||||||
|
stanza["repo_id"] = mod["repo_id"]
|
||||||
|
stanza["format"] = mod["format"]
|
||||||
|
# diffusers don't need width and height (probably .ckpt doesn't either)
|
||||||
|
# so we no longer require these in INITIAL_MODELS.yaml
|
||||||
|
if "width" in mod:
|
||||||
|
stanza["width"] = mod["width"]
|
||||||
|
if "height" in mod:
|
||||||
|
stanza["height"] = mod["height"]
|
||||||
|
if "file" in mod:
|
||||||
|
stanza["weights"] = os.path.relpath(
|
||||||
|
successfully_downloaded[model], start=Globals.root
|
||||||
|
)
|
||||||
|
stanza["config"] = os.path.normpath(
|
||||||
|
os.path.join(sd_configs(), mod["config"])
|
||||||
|
)
|
||||||
|
if "vae" in mod:
|
||||||
|
if "file" in mod["vae"]:
|
||||||
|
stanza["vae"] = os.path.normpath(
|
||||||
|
os.path.join(Model_dir, Weights_dir, mod["vae"]["file"])
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
stanza["vae"] = mod["vae"]
|
||||||
|
if mod.get("default", False):
|
||||||
|
stanza["default"] = True
|
||||||
|
default_selected = True
|
||||||
|
|
||||||
|
conf[model] = stanza
|
||||||
|
|
||||||
|
# if no default model was chosen, then we select the first
|
||||||
|
# one in the list
|
||||||
|
if not default_selected:
|
||||||
|
conf[list(successfully_downloaded.keys())[0]]["default"] = True
|
||||||
|
|
||||||
|
return OmegaConf.to_yaml(conf)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def delete_weights(model_name: str, conf_stanza: dict):
|
||||||
|
if not (weights := conf_stanza.get("weights")):
|
||||||
|
return
|
||||||
|
if re.match("/VAE/", conf_stanza.get("config")):
|
||||||
|
return
|
||||||
|
|
||||||
|
print(
|
||||||
|
f"\n** The checkpoint version of {model_name} is superseded by the diffusers version. Deleting the original file {weights}?"
|
||||||
|
)
|
||||||
|
|
||||||
|
weights = Path(weights)
|
||||||
|
if not weights.is_absolute():
|
||||||
|
weights = Path(Globals.root) / weights
|
||||||
|
try:
|
||||||
|
weights.unlink()
|
||||||
|
except OSError as e:
|
||||||
|
print(str(e))
|